def test_create_and_connect_cfg_drive(self): # With a specified FeedTask task = tf_stg.CreateAndConnectCfgDrive( self.adapter, self.instance, 'injected_files', 'network_info', 'stg_ftsk', admin_pass='******') task.execute('mgmt_cna') self.mock_cfg_drv.assert_called_once_with(self.adapter) self.mock_mb.create_cfg_drv_vopt.assert_called_once_with( self.instance, 'injected_files', 'network_info', 'stg_ftsk', admin_pass='******', mgmt_cna='mgmt_cna') # Normal revert task.revert('mgmt_cna', 'result', 'flow_failures') self.mock_mb.dlt_vopt.assert_called_once_with(self.instance, 'stg_ftsk') self.mock_mb.reset_mock() # Revert when dlt_vopt fails self.mock_mb.dlt_vopt.side_effect = pvm_exc.Error('fake-exc') task.revert('mgmt_cna', 'result', 'flow_failures') self.mock_mb.dlt_vopt.assert_called_once() self.mock_mb.reset_mock() # Revert when media builder not created task.mb = None task.revert('mgmt_cna', 'result', 'flow_failures') self.mock_mb.assert_not_called()
def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param allocations: Information about resources allocated to the instance via placement, of the form returned by SchedulerReportClient.get_allocations_for_consumer. :param network_info: instance network information :param block_device_info: Information about block devices to be attached to the instance. """ self._log_operation('spawn', instance) # Define the flow flow_spawn = tf_lf.Flow("spawn") # This FeedTask accumulates VIOS storage connection operations to be # run in parallel. Include both SCSI and fibre channel mappings for # the scrubber. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP}) flow_spawn.add( tf_vm.Create(self.adapter, self.host_wrapper, instance, stg_ftsk)) # Create a flow for the IO flow_spawn.add( tf_net.PlugVifs(self.virtapi, self.adapter, instance, network_info)) flow_spawn.add(tf_net.PlugMgmtVif(self.adapter, instance)) # Create the boot image. flow_spawn.add( tf_stg.CreateDiskForImg(self.disk_dvr, context, instance, image_meta)) # Connects up the disk to the LPAR flow_spawn.add( tf_stg.AttachDisk(self.disk_dvr, instance, stg_ftsk=stg_ftsk)) # If the config drive is needed, add those steps. Should be done # after all the other I/O. if configdrive.required_by(instance): flow_spawn.add( tf_stg.CreateAndConnectCfgDrive(self.adapter, instance, injected_files, network_info, stg_ftsk, admin_pass=admin_password)) # Add the transaction manager flow at the end of the 'I/O # connection' tasks. This will run all the connections in parallel. flow_spawn.add(stg_ftsk) # Last step is to power on the system. flow_spawn.add(tf_vm.PowerOn(self.adapter, instance)) # Run the flow. tf_base.run(flow_spawn, instance=instance)