def connect_disk(self, context, instance, disk_info, stg_ftsk=None): """Connects the disk image to the Virtual Machine. :param context: nova context for the transaction. :param instance: nova instance to connect the disk to. :param disk_info: The pypowervm storage element returned from create_disk_from_image. Ex. VOptMedia, VDisk, LU, or PV. :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the I/O Operations. If provided, the Virtual I/O Server mapping updates will be added to the FeedTask. This defers the updates to some later point in time. If the FeedTask is not provided, the updates will be run immediately when this method is executed. """ lpar_uuid = vm.get_pvm_uuid(instance) # Ensure we have a transaction manager. if stg_ftsk is None: stg_ftsk = pvm_tpar.build_active_vio_feed_task( self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP]) def add_func(vios_w): LOG.info(_LI("Adding logical volume disk connection between VM " "%(vm)s and VIOS %(vios)s."), {'vm': instance.name, 'vios': vios_w.name}) mapping = tsk_map.build_vscsi_mapping( self.host_uuid, vios_w, lpar_uuid, disk_info) return tsk_map.add_map(vios_w, mapping) stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(add_func) # Run the transaction manager if built locally. if stg_ftsk.name == 'localdisk': stg_ftsk.execute()
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param network_info: instance network information :param block_device_info: Information about block devices to be attached to the instance. """ self._log_operation('spawn', instance) # Define the flow flow_spawn = tf_lf.Flow("spawn") # This FeedTask accumulates VIOS storage connection operations to be # run in parallel. Include both SCSI and fibre channel mappings for # the scrubber. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP}) flow_spawn.add(tf_vm.Create( self.adapter, self.host_wrapper, instance, stg_ftsk)) # TODO(thorst, efried) Plug the VIFs # Create the boot image. flow_spawn.add(tf_stg.CreateDiskForImg( self.disk_dvr, context, instance, image_meta)) # Connects up the disk to the LPAR flow_spawn.add(tf_stg.AttachDisk( self.disk_dvr, instance, stg_ftsk=stg_ftsk)) # TODO(thorst, efried) Add the config drive # Add the transaction manager flow at the end of the 'I/O # connection' tasks. This will run all the connections in parallel. flow_spawn.add(stg_ftsk) # Last step is to power on the system. flow_spawn.add(tf_vm.PowerOn(self.adapter, instance)) # Run the flow. tf_base.run(flow_spawn, instance=instance)
def _setup_flow_and_run(): # Define the flow flow = tf_lf.Flow("destroy") # Power Off the LPAR. If its disks are about to be deleted, issue a # hard shutdown. flow.add(tf_vm.PowerOff(self.adapter, instance, force_immediate=destroy_disks)) # The FeedTask accumulates storage disconnection tasks to be run in # parallel. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag=[pvm_const.XAG.VIO_SMAP]) # Call the unplug VIFs task. While CNAs get removed from the LPAR # directly on the destroy, this clears up the I/O Host side. flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info)) # Add the disconnect/deletion of the vOpt to the transaction # manager. if configdrive.required_by(instance): flow.add(tf_stg.DeleteVOpt( self.adapter, instance, stg_ftsk=stg_ftsk)) # Extract the block devices. bdms = driver.block_device_info_get_mapping(block_device_info) # Determine if there are volumes to detach. If so, remove each # volume (within the transaction manager) for bdm, vol_drv in self._vol_drv_iter( context, instance, bdms, stg_ftsk=stg_ftsk): flow.add(tf_stg.DetachVolume(vol_drv)) # Detach the disk storage adapters flow.add(tf_stg.DetachDisk(self.disk_dvr, instance)) # Accumulated storage disconnection tasks next flow.add(stg_ftsk) # Delete the storage disks if destroy_disks: flow.add(tf_stg.DeleteDisk(self.disk_dvr)) # TODO(thorst, efried) Add LPAR id based scsi map clean up task flow.add(tf_vm.Delete(self.adapter, instance)) # Build the engine & run! tf_base.run(flow, instance=instance)
def disconnect_image_disk(self, context, instance, stg_ftsk=None, disk_type=None): """Disconnects the storage adapters from the image disk. :param context: nova context for operation :param instance: instance to disconnect the image for. :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the I/O Operations. If provided, the Virtual I/O Server mapping updates will be added to the FeedTask. This defers the updates to some later point in time. If the FeedTask is not provided, the updates will be run immediately when this method is executed. :param disk_type: The list of disk types to remove or None which means to remove all disks from the VM. :return: A list of all the backing storage elements that were disconnected from the I/O Server and VM. """ lpar_uuid = vm.get_pvm_uuid(instance) # Ensure we have a transaction manager. if stg_ftsk is None: stg_ftsk = pvm_tpar.build_active_vio_feed_task( self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP]) # Build the match function match_func = tsk_map.gen_match_func(pvm_stg.VDisk, prefixes=disk_type) # Make sure the remove function will run within the transaction manager def rm_func(vios_w): LOG.info(_LI("Disconnecting instance %(inst)s from storage disks.") % {'inst': instance.name}) return tsk_map.remove_maps(vios_w, lpar_uuid, match_func=match_func) stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(rm_func) # Find the disk directly. vios_w = stg_ftsk.wrapper_tasks[self._vios_uuid].wrapper mappings = tsk_map.find_maps(vios_w.scsi_mappings, client_lpar_id=lpar_uuid, match_func=match_func) # Run the transaction manager if built locally. Must be done after # the find to make sure the mappings were found previously. if stg_ftsk.name == 'localdisk': stg_ftsk.execute() return [x.backing_storage for x in mappings]
def detach_disk(self, instance): """Detaches the storage adapters from the disk. :param instance: instance from which to detach the image. :return: A list of all the backing storage elements that were detached from the I/O Server and VM. """ stg_ftsk = tsk_par.build_active_vio_feed_task( self._adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP]) lpar_uuid = vm.get_pvm_uuid(instance) match_func = tsk_map.gen_match_func(pvm_stg.LU) def rm_func(vwrap): LOG.info("Removing SSP disk connection to VIOS %s.", vwrap.name, instance=instance) return tsk_map.remove_maps(vwrap, lpar_uuid, match_func=match_func) # Remove the mapping from *each* VIOS on the LPAR's host. # The LPAR's host has to be self.host_uuid, else the PowerVM API will # fail. # # Note - this may not be all the VIOSes on the system...just the ones # in the SSP cluster. # # The mappings will normally be the same on all VIOSes, unless a VIOS # was down when a disk was added. So for the return value, we need to # collect the union of all relevant mappings from all VIOSes. lu_set = set() for vios_uuid in self._vios_uuids: # Add the remove for the VIO stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func) # Find the active LUs so that a delete op knows what to remove. vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper mappings = tsk_map.find_maps(vios_w.scsi_mappings, client_lpar_id=lpar_uuid, match_func=match_func) if mappings: lu_set.update([x.backing_storage for x in mappings]) stg_ftsk.execute() return list(lu_set)
def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param allocations: Information about resources allocated to the instance via placement, of the form returned by SchedulerReportClient.get_allocations_for_consumer. :param network_info: instance network information :param block_device_info: Information about block devices to be attached to the instance. """ self._log_operation('spawn', instance) # Define the flow flow_spawn = tf_lf.Flow("spawn") # This FeedTask accumulates VIOS storage connection operations to be # run in parallel. Include both SCSI and fibre channel mappings for # the scrubber. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP}) flow_spawn.add(tf_vm.Create( self.adapter, self.host_wrapper, instance, stg_ftsk)) # Create a flow for the IO flow_spawn.add(tf_net.PlugVifs( self.virtapi, self.adapter, instance, network_info)) flow_spawn.add(tf_net.PlugMgmtVif( self.adapter, instance)) # Create the boot image. flow_spawn.add(tf_stg.CreateDiskForImg( self.disk_dvr, context, instance, image_meta)) # Connects up the disk to the LPAR flow_spawn.add(tf_stg.AttachDisk( self.disk_dvr, instance, stg_ftsk=stg_ftsk)) # Extract the block devices. bdms = driver.block_device_info_get_mapping(block_device_info) # Determine if there are volumes to connect. If so, add a connection # for each type. for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms, stg_ftsk=stg_ftsk): # Connect the volume. This will update the connection_info. flow_spawn.add(tf_stg.AttachVolume(vol_drv)) # If the config drive is needed, add those steps. Should be done # after all the other I/O. if configdrive.required_by(instance): flow_spawn.add(tf_stg.CreateAndConnectCfgDrive( self.adapter, instance, injected_files, network_info, stg_ftsk, admin_pass=admin_password)) # Add the transaction manager flow at the end of the 'I/O # connection' tasks. This will run all the connections in parallel. flow_spawn.add(stg_ftsk) # Last step is to power on the system. flow_spawn.add(tf_vm.PowerOn(self.adapter, instance)) # Run the flow. tf_base.run(flow_spawn, instance=instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param allocations: Information about resources allocated to the instance via placement, of the form returned by SchedulerReportClient.get_allocations_for_consumer. :param network_info: instance network information :param block_device_info: Information about block devices to be attached to the instance. """ self._log_operation('spawn', instance) # Define the flow flow_spawn = tf_lf.Flow("spawn") # This FeedTask accumulates VIOS storage connection operations to be # run in parallel. Include both SCSI and fibre channel mappings for # the scrubber. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP}) flow_spawn.add( tf_vm.Create(self.adapter, self.host_wrapper, instance, stg_ftsk)) # Create a flow for the IO flow_spawn.add( tf_net.PlugVifs(self.virtapi, self.adapter, instance, network_info)) flow_spawn.add(tf_net.PlugMgmtVif(self.adapter, instance)) # Create the boot image. flow_spawn.add( tf_stg.CreateDiskForImg(self.disk_dvr, context, instance, image_meta)) # Connects up the disk to the LPAR flow_spawn.add( tf_stg.AttachDisk(self.disk_dvr, instance, stg_ftsk=stg_ftsk)) # Extract the block devices. bdms = driver.block_device_info_get_mapping(block_device_info) # Determine if there are volumes to connect. If so, add a connection # for each type. for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms, stg_ftsk=stg_ftsk): # Connect the volume. This will update the connection_info. flow_spawn.add(tf_stg.AttachVolume(vol_drv)) # If the config drive is needed, add those steps. Should be done # after all the other I/O. if configdrive.required_by(instance): flow_spawn.add( tf_stg.CreateAndConnectCfgDrive(self.adapter, instance, injected_files, network_info, stg_ftsk, admin_pass=admin_password)) # Add the transaction manager flow at the end of the 'I/O # connection' tasks. This will run all the connections in parallel. flow_spawn.add(stg_ftsk) # Last step is to power on the system. flow_spawn.add(tf_vm.PowerOn(self.adapter, instance)) # Run the flow. tf_base.run(flow_spawn, instance=instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param network_info: instance network information :param block_device_info: Information about block devices to be attached to the instance. """ self._log_operation('spawn', instance) # Define the flow flow_spawn = tf_lf.Flow("spawn") # This FeedTask accumulates VIOS storage connection operations to be # run in parallel. Include both SCSI and fibre channel mappings for # the scrubber. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP}) flow_spawn.add( tf_vm.Create(self.adapter, self.host_wrapper, instance, stg_ftsk)) # TODO(thorst, efried) Plug the VIFs # Create the boot image. flow_spawn.add( tf_stg.CreateDiskForImg(self.disk_dvr, context, instance, image_meta)) # Connects up the disk to the LPAR flow_spawn.add( tf_stg.AttachDisk(self.disk_dvr, instance, stg_ftsk=stg_ftsk)) # TODO(thorst, efried) Add the config drive # Add the transaction manager flow at the end of the 'I/O # connection' tasks. This will run all the connections in parallel. flow_spawn.add(stg_ftsk) # Last step is to power on the system. flow_spawn.add(tf_vm.PowerOn(self.adapter, instance)) # Run the flow. tf_base.run(flow_spawn, instance=instance)