def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True, destroy_secrets=True): # cleanup() should not be called when the guest has not been destroyed. if instance.uuid in self.instances: raise exception.InstanceExists( "Instance %s has not been destroyed." % instance.uuid)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info): """Create a new VM on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). """ LOG.info(i18n._("Got request to spawn instance"), instance=instance) if self.instance_exists(instance): raise exception.InstanceExists(name=instance.name) try: self.create_instance(instance, image_meta, network_info) root_path = None if not volumeutils.ebs_root_in_block_devices(block_device_info): root_path = self.create_root_disk(context, instance) ephemeral_path = self.create_ephemeral_disk(instance) self.storage_setup(instance, root_path, ephemeral_path, block_device_info) # TODO(alexandrucoman): Create the config drive except vbox_exc.VBoxException: with excutils.save_and_reraise_exception(): self.destroy(instance) LOG.info(i18n._("The instance was successfully spawned!"), instance=instance)
def spawn(self, instance): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: raise exception.InstanceExists(name=instance.name) user = manager.AuthManager().get_user(instance['user_id']) project = manager.AuthManager().get_project(instance['project_id']) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) images.fetch(instance['image_id'], vhdfile, user, project) try: self._create_vm(instance) self._create_disk(instance['name'], vhdfile) self._create_nic(instance['name'], instance['mac_address']) LOG.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance.name) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info(_("Spawning new instance"), instance=instance) instance_name = instance['name'] if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._create_root_vhd(context, instance) eph_vhd_path = self.create_ephemeral_vhd(instance) try: self.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path) if configdrive.required_by(instance): self._create_config_drive(instance, injected_files, admin_password) self.power_on(instance) except Exception as ex: LOG.exception(ex) self.destroy(instance) raise vmutils.HyperVException(_('Spawn instance failed'))
def spawn(self, context, instance, image_meta, network_info=None, block_device_info=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: raise exception.InstanceExists(name=instance.name) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) images.fetch(instance['image_ref'], vhdfile, instance['user_id'], instance['project_id']) try: self._create_vm(instance) self._create_disk(instance['name'], vhdfile) mac_address = None if instance['mac_addresses']: mac_address = instance['mac_addresses'][0]['address'] self._create_nic(instance['name'], mac_address) LOG.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance.name) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info(_LI("Spawning new instance"), instance=instance) instance_name = instance['name'] if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._create_root_vhd(context, instance) eph_vhd_path = self.create_ephemeral_vhd(instance) vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta) try: self.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen) if configdrive.required_by(instance): configdrive_path = self._create_config_drive(instance, injected_files, admin_password) self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" vm = self._vmutils.lookup(self._conn, instance['name']) if vm is not None: raise exception.InstanceExists(name=instance['name']) ebs_root = self._volumeops.volume_in_mapping( self._volumeops.get_default_root_device(), block_device_info) #If is not a boot from volume spawn if not (ebs_root): #Fetch the file, assume it is a VHD file. vhdfile = self._vmutils.get_vhd_path(instance['name']) try: self._cache_image(fn=self._vmutils.fetch_image, context=context, target=vhdfile, fname=instance['image_ref'], image_id=instance['image_ref'], user=instance['user_id'], project=instance['project_id'], cow=CONF.use_cow_images) except Exception as exn: LOG.exception(_('cache image failed: %s'), exn) self.destroy(instance) try: self._create_vm(instance) if not ebs_root: self._attach_ide_drive(instance['name'], vhdfile, 0, 0, constants.IDE_DISK) else: self._volumeops.attach_boot_volume(block_device_info, instance['name']) #A SCSI controller for volumes connection is created self._create_scsi_controller(instance['name']) for vif in network_info: self._create_nic(instance['name'], vif) self._vif_driver.plug(instance, vif) if configdrive.required_by(instance): self._create_config_drive(instance, injected_files, admin_password) LOG.debug(_('Starting VM %s '), instance['name']) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance['name']) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance) raise exn
def lookup(cls, session, name_label): """Look the instance i up, and returns it if available""" vm_refs = session.get_xenapi().VM.get_by_name_label(name_label) n = len(vm_refs) if n == 0: return None elif n > 1: raise exception.InstanceExists(name=name_label) else: return vm_refs[0]
def check_can_live_migrate_destination( self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): try: self.client.containers.get(instance.name) raise exception.InstanceExists(name=instance.name) except lxd_exceptions.LXDAPIException as e: if e.response.status_code != 404: raise return LXDLiveMigrateData()
def _create_vm(self, instance, vdi_uuid, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is not None: raise exception.InstanceExists(name=instance_name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): LOG.exception(_('instance %(instance_name)s: not enough free ' 'memory') % locals()) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) return user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, vdi_ref, disk_image_type, instance.os_type) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. if network_info is None: network_info = self._get_network_info(instance) # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) return vm_ref
def spawn(self, context, instance, image_meta, network_info, block_device_info=None): """ Create a new VM and start it.""" instance_name = instance["name"] vm = self._vmutils.lookup(self._conn, instance_name) if vm is not None: raise exception.InstanceExists(name=instance_name) ebs_root = self._volumeops.volume_in_mapping( self._volumeops.get_default_root_device(), block_device_info) #If is not a boot from volume spawn if not (ebs_root): #Fetch the file, assume it is a VHD file. vhdfile = self._vmutils.get_vhd_path(instance_name) try: self._cache_image(fn=self._vmutils.fetch_image, context=context, target=vhdfile, fname=instance['image_ref'], image_id=instance['image_ref'], user=instance['user_id'], project=instance['project_id'], cow=FLAGS.use_cow_images) except Exception as exn: LOG.exception(_('cache image failed: %s'), exn) self.destroy(instance) try: self._create_vm(instance) if not ebs_root: self._create_disk(instance['name'], vhdfile) else: self._volumeops.attach_boot_volume(block_device_info, instance_name) #A SCSI controller for volumes connection is created self._create_scsi_controller(instance['name']) for vif in network_info: mac_address = vif['address'].replace(':', '') self._create_nic(instance['name'], mac_address) LOG.debug(_('Starting VM %s '), instance_name) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance_name) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance) raise exn
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info("Spawning new instance", instance=instance) instance_name = instance.name if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) self._block_dev_man.validate_and_update_bdi(instance, image_meta, vm_gen, block_device_info) root_device = block_device_info['root_disk'] self._create_root_device(context, instance, root_device, vm_gen) self._create_ephemerals(instance, block_device_info['ephemerals']) try: with self.wait_vif_plug_events(instance, network_info): # waiting will occur after the instance is created. self.create_instance(instance, network_info, root_device, block_device_info, vm_gen, image_meta) # This is supported starting from OVS version 2.5 self.plug_vifs(instance, network_info) self._save_device_metadata(context, instance, block_device_info) if configdrive.required_by(instance): configdrive_path = self._create_config_drive( context, instance, injected_files, admin_password, network_info) self.attach_config_drive(instance, configdrive_path, vm_gen) self.set_boot_order(instance.name, vm_gen, block_device_info) # vifs are already plugged in at this point. We waited on the vif # plug event previously when we created the instance. Skip the # plug vifs during power on in this case self.power_on(instance, network_info=network_info, should_plug_vifs=False) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance, network_info, block_device_info)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info(_LI("Spawning new instance"), instance=instance) instance_name = instance.name if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) if 'properties' in image_meta and 'mtwilson_trustpolicy_location' in image_meta[ 'properties']: instance['metadata']['mtwilson_trustpolicy_location'] = image_meta[ 'properties']['mtwilson_trustpolicy_location'] instance.save() if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._create_root_vhd(context, instance) eph_vhd_path = self.create_ephemeral_vhd(instance) # TODO(lpetrut): move this to the create_instance method. vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta) try: self.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen, image_meta) LOG.info(instance) if configdrive.required_by(instance): configdrive_path = self._create_config_drive( instance, injected_files, admin_password, network_info) self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance, network_info=network_info) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info("Spawning new instance", instance=instance) instance_name = instance.name if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) self._block_dev_man.validate_and_update_bdi(instance, image_meta, vm_gen, block_device_info) root_device = block_device_info['root_disk'] self._create_root_device(context, instance, root_device, vm_gen) self._create_ephemerals(instance, block_device_info['ephemerals']) try: with self.wait_vif_plug_events(instance, network_info): # waiting will occur after the instance is created. self.create_instance(instance, network_info, root_device, block_device_info, vm_gen, image_meta) self._save_device_metadata(context, instance, block_device_info) if configdrive.required_by(instance): configdrive_path = self._create_config_drive( context, instance, injected_files, admin_password, network_info) self.attach_config_drive(instance, configdrive_path, vm_gen) self.set_boot_order(instance.name, vm_gen, block_device_info) self.power_on(instance, network_info=network_info) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance, network_info, block_device_info)
def spawn(self, context, instance, image_meta, injected_files, admin_password=None, network_info=None, block_device_info=None, need_vif_plugged=True, rescue=False): msg = ('Spawning container ' 'network_info=%(network_info)s ' 'image_meta=%(image_meta)s ' 'instance=%(instance)s ' 'block_device_info=%(block_device_info)s' % { 'network_info': network_info, 'instance': instance, 'image_meta': image_meta, 'block_device_info': block_device_info }) LOG.debug(msg, instance=instance) if self.container_client.client('defined', instance=instance.name, host=instance.host): raise exception.InstanceExists(name=instance.name) start = time.time() try: self.container_image.setup_image(context, instance, image_meta) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Upload image failed: %(e)s'), {'e': ex}) try: self.create_container(instance, injected_files, network_info, block_device_info, rescue, need_vif_plugged) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Container creation failed: %(e)s'), {'e': ex}) end = time.time() total = end - start LOG.debug('Creation took %s seconds to boot.' % total)
def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, block_device_info=None, power_on=True): LOG.info("Spawning new instance %s on zVM hypervisor", instance.name, instance=instance) if self._hypervisor.guest_exists(instance): raise exception.InstanceExists(name=instance.name) os_distro = image_meta.properties.get('os_distro') if os_distro is None or len(os_distro) == 0: reason = _("The `os_distro` image metadata property is required") raise exception.InvalidInput(reason=reason) try: spawn_start = time.time() transportfiles = zvmutils.generate_configdrive( context, instance, injected_files, network_info, admin_password) spawn_image_name = self._get_image_info(context, image_meta.id, os_distro) disk_list, eph_list = self._set_disk_list(instance, spawn_image_name, block_device_info) # Create the guest vm self._hypervisor.guest_create(instance.name, instance.vcpus, instance.memory_mb, disk_list) # Deploy image to the guest vm self._hypervisor.guest_deploy(instance.name, spawn_image_name, transportfiles=transportfiles) # Handle ephemeral disks if eph_list: self._hypervisor.guest_config_minidisks( instance.name, eph_list) # Setup network for z/VM instance self._wait_vif_plug_events(instance.name, os_distro, network_info, instance) self._hypervisor.guest_start(instance.name) spawn_time = time.time() - spawn_start LOG.info("Instance spawned successfully in %s seconds", spawn_time, instance=instance) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error( "Deploy instance %(instance)s " "failed with reason: %(err)s", { 'instance': instance.name, 'err': err }, instance=instance) try: self.destroy(context, instance, network_info, block_device_info) except Exception: LOG.exception("Failed to destroy instance", instance=instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create a new lxd container as a nova instance. Creating a new container requires a number of steps. First, the image is fetched from glance, if needed. Next, the network is connected. A profile is created in LXD, and then the container is created and started. See `nova.virt.driver.ComputeDriver.spawn` for more information. """ try: self.client.containers.get(instance.name) raise exception.InstanceExists(name=instance.name) except lxd_exceptions.LXDAPIException as e: if e.response.status_code != 404: raise # Re-raise the exception if it wasn't NotFound instance_dir = common.InstanceAttributes(instance).instance_dir if not os.path.exists(instance_dir): fileutils.ensure_tree(instance_dir) # Check to see if LXD already has a copy of the image. If not, # fetch it. try: self.client.images.get_by_alias(instance.image_ref) except lxd_exceptions.LXDAPIException as e: if e.response.status_code != 404: raise _sync_glance_image_to_lxd( self.client, context, instance.image_ref) # Plug in the network if network_info: timeout = CONF.vif_plugging_timeout if (utils.is_neutron() and timeout): events = [('network-vif-plugged', vif['id']) for vif in network_info if not vif.get( 'active', True)] else: events = [] try: with self.virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=_neutron_failed_callback): self.plug_vifs(instance, network_info) except eventlet.timeout.Timeout: LOG.warn('Timeout waiting for vif plugging callback for ' 'instance %(uuid)s', {'uuid': instance['name']}) if CONF.vif_plugging_is_fatal: self.destroy( context, instance, network_info, block_device_info) raise exception.InstanceDeployFailure( 'Timeout waiting for vif plugging', instance_id=instance['name']) # Create the profile try: profile = flavor.to_profile( self.client, instance, network_info, block_device_info) except lxd_exceptions.LXDAPIException as e: with excutils.save_and_reraise_exception(): self.cleanup( context, instance, network_info, block_device_info) # Create the container container_config = { 'name': instance.name, 'profiles': [profile.name], 'source': { 'type': 'image', 'alias': instance.image_ref, }, } try: container = self.client.containers.create( container_config, wait=True) except lxd_exceptions.LXDAPIException as e: with excutils.save_and_reraise_exception(): self.cleanup( context, instance, network_info, block_device_info) lxd_config = self.client.host_info storage.attach_ephemeral( self.client, block_device_info, lxd_config, instance) if configdrive.required_by(instance): configdrive_path = self._add_configdrive( context, instance, injected_files, admin_password, network_info) profile = self.client.profiles.get(instance.name) config_drive = { 'configdrive': { 'path': '/config-drive', 'source': configdrive_path, 'type': 'disk', 'readonly': 'True', } } profile.devices.update(config_drive) profile.save() try: self.firewall_driver.setup_basic_filtering( instance, network_info) self.firewall_driver.instance_filter( instance, network_info) container.start(wait=True) self.firewall_driver.apply_instance_filter( instance, network_info) except lxd_exceptions.LXDAPIException as e: with excutils.save_and_reraise_exception(): self.cleanup( context, instance, network_info, block_device_info)
def spawn(self, context, instance, image_meta, injected_files, admin_password=None, network_info=None, block_device_info=None): """Start the LXD container Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param image_meta: image object returned by nova.image.glance that defines the image from which to boot this instance :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param block_device_info: Information about block devices to be attached to the instance """ msg = ('Spawning container ' 'network_info=%(network_info)s ' 'image_meta=%(image_meta)s ' 'instance=%(instance)s ' 'block_device_info=%(block_device_info)s' % { 'network_info': network_info, 'instance': instance, 'image_meta': image_meta, 'block_device_info': block_device_info }) LOG.debug(msg, instance=instance) instance_name = instance.name if self.session.container_defined(instance_name, instance): raise exception.InstanceExists(name=instance.name) try: # Ensure that the instance directory exists self.instance_dir = \ self.container_dir.get_instance_dir(instance_name) if not os.path.exists(self.instance_dir): fileutils.ensure_tree(self.instance_dir) # Step 1 - Fetch the image from glance self._fetch_image(context, instance, image_meta) # Step 2 - Setup the container network self._setup_network(instance_name, instance, network_info) # Step 3 - Create the container profile self._setup_profile(instance_name, instance, network_info) # Step 4 - Create a config drive (optional) if configdrive.required_by(instance): self._add_configdrive(instance, injected_files) # Step 5 - Configure and start the container self._setup_container(instance_name, instance) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Faild to start container ' '%(instance)s: %(ex)s'), { 'instance': instance.name, 'ex': ex }, instance=instance) self.destroy(context, instance, network_info)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" instance_name = instance['name'] if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) ebs_root = self._volumeops.volume_in_mapping( self._volumeops.get_default_root_device(), block_device_info) #If is not a boot from volume spawn if not (ebs_root): #Fetch the file, assume it is a VHD file. vhdfile = self._pathutils.get_vhd_path(instance_name) try: self._cache_image(fn=self._fetch_image, context=context, target=vhdfile, fname=instance['image_ref'], image_id=instance['image_ref'], user=instance['user_id'], project=instance['project_id'], cow=CONF.use_cow_images) except Exception as exn: LOG.exception(_('cache image failed: %s'), exn) raise try: self._vmutils.create_vm(instance_name, instance['memory_mb'], instance['vcpus'], CONF.limit_cpu_features) if not ebs_root: self._vmutils.attach_ide_drive(instance_name, vhdfile, 0, 0, constants.IDE_DISK) else: self._volumeops.attach_boot_volume(block_device_info, instance_name) self._vmutils.create_scsi_controller(instance_name) for vif in network_info: LOG.debug(_('Creating nic for instance: %s'), instance_name) self._vmutils.create_nic(instance_name, vif['id'], vif['address']) self._vif_driver.plug(instance, vif) if configdrive.required_by(instance): self._create_config_drive(instance, injected_files, admin_password) self._set_vm_state(instance_name, constants.HYPERV_VM_STATE_ENABLED) except Exception as ex: LOG.exception(ex) self.destroy(instance) raise vmutils.HyperVException(_('Spawn instance failed'))
def spawn(self, context, instance, image_meta, network_info): """ Creates a VM instance. Steps followed are: 1. Create a VM with no disk and the specifics in the instance object like RAM size. 2. Create a dummy vmdk of the size of the disk file that is to be uploaded. This is required just to create the metadata file. 3. Delete the -flat.vmdk file created in the above step and retain the metadata .vmdk file. 4. Upload the disk file. 5. Attach the disk to the VM by reconfiguring the same. 6. Power on the VM. """ vm_ref = self._get_vm_ref_from_the_name(instance.name) if vm_ref: raise exception.InstanceExists(name=instance.name) client_factory = self._session._get_vim().client.factory service_content = self._session._get_vim().get_service_content() def _get_datastore_ref(): """Get the datastore list and choose the first local storage.""" data_stores = self._session._call_method( vim_util, "get_objects", "Datastore", ["summary.type", "summary.name"]) for elem in data_stores: ds_name = None ds_type = None for prop in elem.propSet: if prop.name == "summary.type": ds_type = prop.val elif prop.name == "summary.name": ds_name = prop.val # Local storage identifier if ds_type == "VMFS": data_store_name = ds_name return data_store_name if data_store_name is None: msg = _("Couldn't get a local Datastore reference") LOG.exception(msg) raise exception.Error(msg) data_store_name = _get_datastore_ref() def _get_image_properties(): """ Get the Size of the flat vmdk file that is there on the storage repository. """ image_size, image_properties = \ vmware_images.get_vmdk_size_and_properties(context, instance.image_ref, instance) vmdk_file_size_in_kb = int(image_size) / 1024 os_type = image_properties.get("vmware_ostype", "otherGuest") adapter_type = image_properties.get("vmware_adaptertype", "lsiLogic") return vmdk_file_size_in_kb, os_type, adapter_type vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties() def _get_vmfolder_and_res_pool_mors(): """Get the Vm folder ref from the datacenter.""" dc_objs = self._session._call_method(vim_util, "get_objects", "Datacenter", ["vmFolder"]) # There is only one default datacenter in a standalone ESX host vm_folder_mor = dc_objs[0].propSet[0].val # Get the resource pool. Taking the first resource pool coming our # way. Assuming that is the default resource pool. res_pool_mor = self._session._call_method(vim_util, "get_objects", "ResourcePool")[0].obj return vm_folder_mor, res_pool_mor vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors() def _check_if_network_bridge_exists(network_name): network_ref = \ network_utils.get_network_with_the_name(self._session, network_name) if network_ref is None: raise exception.NetworkNotFoundForBridge(bridge=network_name) return network_ref def _get_vif_infos(): vif_infos = [] for (network, mapping) in network_info: mac_address = mapping['mac'] network_name = network['bridge'] if mapping.get('should_create_vlan'): network_ref = self._vif_driver.ensure_vlan_bridge( self._session, network) else: network_ref = _check_if_network_bridge_exists(network_name) vif_infos.append({ 'network_name': network_name, 'mac_address': mac_address, 'network_ref': network_ref, }) return vif_infos vif_infos = _get_vif_infos() # Get the create vm config spec config_spec = vm_util.get_vm_create_spec(client_factory, instance, data_store_name, vif_infos, os_type) def _execute_create_vm(): """Create VM on ESX host.""" LOG.debug( _("Creating VM with the name %s on the ESX host") % instance.name) # Create the VM on the ESX host vm_create_task = self._session._call_method( self._session._get_vim(), "CreateVM_Task", vm_folder_mor, config=config_spec, pool=res_pool_mor) self._session._wait_for_task(instance.id, vm_create_task) LOG.debug( _("Created VM with the name %s on the ESX host") % instance.name) _execute_create_vm() # Set the machine.id parameter of the instance to inject # the NIC configuration inside the VM if FLAGS.flat_injected: self._set_machine_id(client_factory, instance, network_info) # Naming the VM files in correspondence with the VM instance name # The flat vmdk file name flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name, instance.name) # The vmdk meta-data file uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name) flat_uploaded_vmdk_path = vm_util.build_datastore_path( data_store_name, flat_uploaded_vmdk_name) uploaded_vmdk_path = vm_util.build_datastore_path( data_store_name, uploaded_vmdk_name) def _create_virtual_disk(): """Create a virtual disk of the size of flat vmdk file.""" # Create a Virtual Disk of the size of the flat vmdk file. This is # done just to generate the meta-data file whose specifics # depend on the size of the disk, thin/thick provisioning and the # storage adapter type. # Here we assume thick provisioning and lsiLogic for the adapter # type LOG.debug( _("Creating Virtual Disk of size " "%(vmdk_file_size_in_kb)s KB and adapter type " "%(adapter_type)s on the ESX host local store" " %(data_store_name)s") % { "vmdk_file_size_in_kb": vmdk_file_size_in_kb, "adapter_type": adapter_type, "data_store_name": data_store_name }) vmdk_create_spec = vm_util.get_vmdk_create_spec( client_factory, vmdk_file_size_in_kb, adapter_type) vmdk_create_task = self._session._call_method( self._session._get_vim(), "CreateVirtualDisk_Task", service_content.virtualDiskManager, name=uploaded_vmdk_path, datacenter=self._get_datacenter_name_and_ref()[0], spec=vmdk_create_spec) self._session._wait_for_task(instance.id, vmdk_create_task) LOG.debug( _("Created Virtual Disk of size %(vmdk_file_size_in_kb)s" " KB on the ESX host local store " "%(data_store_name)s") % { "vmdk_file_size_in_kb": vmdk_file_size_in_kb, "data_store_name": data_store_name }) _create_virtual_disk() def _delete_disk_file(): LOG.debug( _("Deleting the file %(flat_uploaded_vmdk_path)s " "on the ESX host local" "store %(data_store_name)s") % { "flat_uploaded_vmdk_path": flat_uploaded_vmdk_path, "data_store_name": data_store_name }) # Delete the -flat.vmdk file created. .vmdk file is retained. vmdk_delete_task = self._session._call_method( self._session._get_vim(), "DeleteDatastoreFile_Task", service_content.fileManager, name=flat_uploaded_vmdk_path) self._session._wait_for_task(instance.id, vmdk_delete_task) LOG.debug( _("Deleted the file %(flat_uploaded_vmdk_path)s on the " "ESX host local store %(data_store_name)s") % { "flat_uploaded_vmdk_path": flat_uploaded_vmdk_path, "data_store_name": data_store_name }) _delete_disk_file() cookies = self._session._get_vim().client.options.transport.cookiejar def _fetch_image_on_esx_datastore(): """Fetch image from Glance to ESX datastore.""" LOG.debug( _("Downloading image file data %(image_ref)s to the ESX " "data store %(data_store_name)s") % ({ 'image_ref': instance.image_ref, 'data_store_name': data_store_name })) # Upload the -flat.vmdk file whose meta-data file we just created # above vmware_images.fetch_image( context, instance.image_ref, instance, host=self._session._host_ip, data_center_name=self._get_datacenter_name_and_ref()[1], datastore_name=data_store_name, cookies=cookies, file_path=flat_uploaded_vmdk_name) LOG.debug( _("Downloaded image file data %(image_ref)s to the ESX " "data store %(data_store_name)s") % ({ 'image_ref': instance.image_ref, 'data_store_name': data_store_name })) _fetch_image_on_esx_datastore() vm_ref = self._get_vm_ref_from_the_name(instance.name) def _attach_vmdk_to_the_vm(): """ Attach the vmdk uploaded to the VM. VM reconfigure is done to do so. """ vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec( client_factory, vmdk_file_size_in_kb, uploaded_vmdk_path, adapter_type) LOG.debug( _("Reconfiguring VM instance %s to attach the image " "disk") % instance.name) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vmdk_attach_config_spec) self._session._wait_for_task(instance.id, reconfig_task) LOG.debug( _("Reconfigured VM instance %s to attach the image " "disk") % instance.name) _attach_vmdk_to_the_vm() def _power_on_vm(): """Power on the VM.""" LOG.debug(_("Powering on the VM instance %s") % instance.name) # Power On the VM power_on_task = self._session._call_method( self._session._get_vim(), "PowerOnVM_Task", vm_ref) self._session._wait_for_task(instance.id, power_on_task) LOG.debug(_("Powered on the VM instance %s") % instance.name) _power_on_vm()