def add_disk(api): glance_disk = api.disks.get(GLANCE_DISK_NAME) if glance_disk: nt.assert_true( api.vms.get(VM0_NAME).disks.add( params.Disk( id=glance_disk.get_id(), active=True, bootable=True, ))) disk_params = params.Disk( name=DISK1_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains(storage_domain=[ params.StorageDomain(name='nfs', ), ], ), status=None, sparse=True, active=True, bootable=True, ) nt.assert_true(api.vms.get(VM1_NAME).disks.add(disk_params)) if glance_disk: testlib.assert_true_within_short(lambda: api.vms.get( VM0_NAME).disks.get(GLANCE_DISK_NAME).status.state == 'ok') testlib.assert_true_within_short(lambda: api.vms.get(VM1_NAME).disks.get( DISK1_NAME).status.state == 'ok')
def snapshot_merge(api): dead_snap1_params = params.Snapshot( description='dead_snap1', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM0_NAME).disks.get(DISK0_NAME).id, ), ], ), ) api.vms.get(VM0_NAME).snapshots.add(dead_snap1_params) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') dead_snap2_params = params.Snapshot( description='dead_snap2', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM0_NAME).disks.get(DISK0_NAME).id, ), ], ), ) api.vms.get(VM0_NAME).snapshots.add(dead_snap2_params) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') api.vms.get(VM0_NAME).snapshots.list()[-2].delete() testlib.assert_true_within_short( lambda: (len(api.vms.get(VM0_NAME).snapshots.list()) == 2) and (api.vms.get(VM0_NAME).snapshots.list()[-1].snapshot_status == 'ok'), )
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): if vmdisk_alloc == 'thin': # define VM params vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype) # define disk params vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) # define network parameters network_net = params.Network(name=vmnetwork) nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') elif vmdisk_alloc == 'preallocated': # define VM params vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype) # define disk params vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) # define network parameters network_net = params.Network(name=vmnetwork) nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') try: conn.vms.add(vmparams) except: print "Error creating VM with specified parameters" sys.exit(1) vm = conn.vms.get(name=vmname) try: vm.disks.add(vmdisk) except: print "Error attaching disk" try: vm.nics.add(nic_net1) except: print "Error adding nic"
def snapshot_cold_merge(api): if api.vms.get(VM1_NAME) is None: raise SkipTest('Glance is not available') dead_snap1_params = params.Snapshot( description='dead_snap1', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM1_NAME).disks.get(DISK1_NAME).id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(dead_snap1_params) testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).snapshots. list()[-1].snapshot_status == 'ok') dead_snap2_params = params.Snapshot( description='dead_snap2', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM1_NAME).disks.get(DISK1_NAME).id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(dead_snap2_params) testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).snapshots. list()[-1].snapshot_status == 'ok') api.vms.get(VM1_NAME).snapshots.list()[-2].delete() testlib.assert_true_within_long( lambda: (len(api.vms.get(VM1_NAME).snapshots.list()) == 2) and (api.vms.get(VM1_NAME).snapshots.list()[-1].snapshot_status == 'ok'), )
def snapshot_live_merge(api): disk = api.vms.get(VM1_NAME).disks.list()[0] disk_id = disk.id disk_name = disk.name live_snap1_params = params.Snapshot( description='live_snap1', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(live_snap1_params) testlib.assert_true_within( func=(lambda: api.vms.get(VM1_NAME).snapshots.list()[-1]. snapshot_status == 'ok'), timeout=SHORT_TIMEOUT, ) live_snap2_params = params.Snapshot( description='live_snap2', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(live_snap2_params) for i, _ in enumerate(api.vms.get(VM1_NAME).snapshots.list()): testlib.assert_true_within( func=(lambda: (api.vms.get(VM1_NAME).snapshots.list()[i]. snapshot_status == 'ok')), timeout=SHORT_TIMEOUT, ) api.vms.get(VM1_NAME).snapshots.list()[-2].delete() testlib.assert_true_within_long( lambda: len(api.vms.get(VM1_NAME).snapshots.list()) == 2, ) for i, _ in enumerate(api.vms.get(VM1_NAME).snapshots.list()): testlib.assert_true_within_long( lambda: (api.vms.get(VM1_NAME).snapshots.list()[i].snapshot_status == 'ok'), ) testlib.assert_true_within_short( lambda: api.vms.get(VM1_NAME).status.state == 'up', ) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok', )
def attach_detach_disk(vm, disk, new_disk): print("[{}] Attaching the '{}' Cinder volume to the VM...".format( vm.name, disk.name)) vm.disks.add(params.Disk(id=new_disk.id, active=True)) print("[{}] Detaching the '{}' NFS volume from the VM...".format( vm.name, disk.name)) disk.delete(action=params.Action(detach=True))
def hotplug_disk(api): disk2_params = params.Disk( name=DISK1_NAME, size=9 * GB, provisioned_size=2, interface='virtio', format='cow', storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( name='iscsi', ), ], ), status=None, sparse=True, bootable=False, active=True, ) api.vms.get(VM0_NAME).disks.add(disk2_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).disks.get(DISK1_NAME).status.state == 'ok' ) nt.assert_true(api.vms.get(VM0_NAME).disks.get(DISK1_NAME).active)
def _register_disks(self, active_sds): if (self.disksSlider.hidden == False): for storageDomain in active_sds: unreg_disks = storageDomain.disks.list(unregistered=True) self.disksSlider.value = 0 self.disk_import.value = "Storage register disks from storage %s" % storageDomain.name self.disksSlider.update() self.disk_import.update() ind = 1 for disk_per_domain in unreg_disks: self._update_slider_text(self.disksSlider, self.disk_import, "disk", disk_per_domain, len(unreg_disks), ind, "Storage: " + storageDomain.name) ind += 1 try: storageDomain.disks.add( disk=params.Disk(id=disk_per_domain.id), unregistered=True) except Exception as e: self._handle_exception(self.disk_import, disk_per_domain, e) self._finish_registration(self.disksSlider, self.disk_import, "disk")
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface): """Adds second disk to a temporary VM. Args: api: API to chosen RHEVM provider. sdomain: Storage domain to save new disk onto. disk_size: Size of the new disk (in B). disk_format: Format of the new disk. disk_interface: Interface of the new disk. """ if len(api.vms.get(TEMP_VM_NAME).disks.list()) > 1: print("RHEVM: Warning: found more than one disk in existing VM.") print("RHEVM: Skipping this step, attempting to continue...") return actual_sdomain = api.storagedomains.get(sdomain) temp_vm = api.vms.get(TEMP_VM_NAME) params_disk = params.Disk(storage_domain=actual_sdomain, size=disk_size, interface=disk_interface, format=disk_format) temp_vm.disks.add(params_disk) wait_for(check_disks, [api], fail_condition=False, delay=5, num_sec=900) # check, if there are two disks if len(api.vms.get(TEMP_VM_NAME).disks.list()) < 2: print("RHEVM: Disk failed to add") sys.exit(127)
def create_lun(api, vm_name, disk_alias, lun_id): """ Create a new direct attach disk from lun_id then attach to vm_name """ try: lu = params.LogicalUnit() lu.set_id(lun_id) lus = list() lus.append(lu) storage_params = params.Storage() storage_params.set_id(lun_id) storage_params.set_logical_unit(lus) storage_params.set_type('fcp') disk_params = params.Disk() disk_params.set_format('raw') disk_params.set_interface('virtio') disk_params.set_alias(disk_alias) disk_params.set_active(True) disk_params.set_lun_storage(storage_params) if vm_name: if not MODULE_CHECK_MODE: vm = api.vms.get(name=vm_name) disk = vm.disks.add(disk_params) else: if not MODULE_CHECK_MODE: disk = api.disks.add(disk_params) return 1 except Exception as e: raise Exception("Error while adding new lun: " + str(e))
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface, temp_vm_name, provider): """Adds second disk to a temporary VM. Args: api: API to chosen RHEVM provider. sdomain: Storage domain to save new disk onto. disk_size: Size of the new disk (in B). disk_format: Format of the new disk. disk_interface: Interface of the new disk. """ try: if len(api.vms.get(temp_vm_name).disks.list()) > 1: logger.info("RHEVM:%r Warning: found more than one disk in existing VM (%r).", provider, temp_vm_name) logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider) return actual_sdomain = api.storagedomains.get(sdomain) temp_vm = api.vms.get(temp_vm_name) params_disk = params.Disk(storage_domain=actual_sdomain, size=disk_size, interface=disk_interface, format=disk_format) temp_vm.disks.add(params_disk) wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900) # check, if there are two disks if len(api.vms.get(temp_vm_name).disks.list()) < 2: logger.error("RHEVM:%r Disk failed to add", provider) sys.exit(127) logger.info("RHEVM:%r Successfully added disk", provider) except Exception: logger.exception("RHEVM:%r add_disk_to_temp_vm failed:", provider)
def generic_import_from_glance(api, image_name=CIRROS_IMAGE_NAME, as_template=False, image_ext='_glance_disk', template_ext='_glance_template', dest_storage_domain=MASTER_SD_TYPE, dest_cluster=CLUSTER_NAME): glance_provider = api.storagedomains.get(SD_GLANCE_NAME) target_image = glance_provider.images.get(name=image_name) disk_name = image_name.replace(" ", "_") + image_ext template_name = image_name.replace(" ", "_") + template_ext import_action = params.Action( storage_domain=params.StorageDomain( name=dest_storage_domain, ), cluster=params.Cluster( name=dest_cluster, ), import_as_template=as_template, disk=params.Disk( name=disk_name, ), template=params.Template( name=template_name, ), ) nt.assert_true( target_image.import_image(import_action) ) testlib.assert_true_within_long( lambda: api.disks.get(disk_name).status.state == 'ok', )
def add_directlun(prefix): # Find LUN GUIDs ret = prefix.virt_env.get_vm(SD_ISCSI_HOST_NAME).ssh( ['cat', '/root/multipath.txt']) nt.assert_equals(ret.code, 0) all_guids = ret.out.splitlines() lun_guid = all_guids[ SD_ISCSI_NR_LUNS] #Take the first unused LUN. 0-(SD_ISCSI_NR_LUNS) are used by iSCSI SD dlun_params = params.Disk( name=DLUN_DISK_NAME, interface='virtio_scsi', format='raw', lun_storage=params.Storage( type_='iscsi', logical_unit=[ params.LogicalUnit( id=lun_guid, address=prefix.virt_env.get_vm(SD_ISCSI_HOST_NAME).ip(), port=SD_ISCSI_PORT, target=SD_ISCSI_TARGET, username='******', password='******', ) ]), sgio='unfiltered', ) api = prefix.virt_env.engine_vm().get_api() api.vms.get(VM0_NAME).disks.add(dlun_params) nt.assert_not_equal( api.vms.get(VM0_NAME).disks.get(DLUN_DISK_NAME), None, 'Direct LUN disk not attached')
def snapshot_live_merge(api): raise SkipTest( "[02/04/17] Test is failing for weeks without real knowladge on the reason, despite debugging from storage team" ) disk = api.vms.get(VM0_NAME).disks.list()[0] disk_id = disk.id disk_name = disk.name live_snap1_params = params.Snapshot( description='live_snap1', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap1_params)) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') live_snap2_params = params.Snapshot( description='live_snap2', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap2_params)) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_short(lambda: (api.vms.get( VM0_NAME).snapshots.list()[i].snapshot_status == 'ok')) nt.assert_true(api.vms.get(VM0_NAME).snapshots.list()[-2].delete()) testlib.assert_true_within_long( lambda: len(api.vms.get(VM0_NAME).snapshots.list()) == 2, ) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_long( lambda: (api.vms.get(VM0_NAME).snapshots.list()[i].snapshot_status == 'ok'), ) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up') testlib.assert_true_within_long(lambda: api.vms.get(VM0_NAME).disks.get( disk_name).status.state == 'ok')
def clone_snapshot(api, config, vm_from_list): """ Clone snapshot into a new vm :param api: ovirtsdk api :param config: Configuration :vm: VM to clone """ vm_clone_name = vm_from_list + config.get_vm_middle() + config.get_vm_suffix() vm = api.vms.get(vm_from_list) snapshots = vm.snapshots.list(description=config.get_snapshot_description()) if not snapshots: logger.error("!!! No snapshot found !!!") has_errors = True snapshot=snapshots[0] # Find the storage domain where the disks should be created: sd = api.storagedomains.get(name=config.get_destination_domain()) # Find the image identifiers of the disks of the snapshot, as # we need them in order to explicitly indicate that we want # them created in a different storage domain: disk_ids = [] for current in snapshot.disks.list(): disk_ids.append(current.get_id()) # Prepare the list of disks for the operation to create the # snapshot,explicitly indicating for each of them the storage # domain where it should be created: disk_list = [] for disk_id in disk_ids: disk = params.Disk( image_id=disk_id, storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( id=sd.get_id(), ), ], ), ) disk_list.append(disk) snapshot_param = params.Snapshot(id=snapshot.id) snapshots_param = params.Snapshots(snapshot=[snapshot_param]) logger.info("Clone into VM (%s) started ..." % vm_clone_name) if not config.get_dry_run(): api.vms.add(params.VM( name=vm_clone_name, memory=vm.get_memory(), cluster=api.clusters.get(config.get_cluster_name()), snapshots=snapshots_param, disks=params.Disks( disk=disk_list, ) ) ) VMTools.wait_for_vm_operation(api, config, "Cloning", vm_from_list) logger.info("Cloning finished")
def add(self, memory, disk_size, cluster_name, storage_name, nic_name='eth0', network_interface='virtio', network_name='ovirtmgmt', disk_interface='virtio', disk_format='raw', template_name='Blank'): """ Create VM with one NIC and one Disk. @memory: VM's memory size such as 1024*1024*1024=1GB. @disk_size: VM's disk size such as 512*1024=512MB. @nic_name: VM's NICs name such as 'eth0'. @network_interface: VM's network interface such as 'virtio'. @network_name: network such as ovirtmgmt for ovirt, rhevm for rhel. @disk_format: VM's disk format such as 'raw' or 'cow'. @disk_interface: VM's disk interface such as 'virtio'. @cluster_name: cluster name. @storage_name: storage domain name. @template_name: VM's template name, default is 'Blank'. """ # network name is ovirtmgmt for ovirt, rhevm for rhel. vm_params = param.VM(name=self.name, memory=memory, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) storage = self.api.storagedomains.get(storage_name) storage_params = param.StorageDomains(storage_domain=[storage]) nic_params = param.NIC(name=nic_name, network=param.Network(name=network_name), interface=network_interface) disk_params = param.Disk(storage_domains=storage_params, size=disk_size, type_='system', status=None, interface=disk_interface, format=disk_format, sparse=True, bootable=True) try: logging.info('Creating a VM %s' % self.name) self.api.vms.add(vm_params) logging.info('NIC is added to VM %s' % self.name) self.instance.nics.add(nic_params) logging.info('Disk is added to VM %s' % self.name) self.instance.disks.add(disk_params) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': time.sleep(1) except Exception, e: logging.error('Failed to create VM with disk and NIC\n%s' % str(e))
def snapshot_live_merge(api): if api.vms.get(VM0_NAME).disks.get(GLANCE_DISK_NAME) is None: raise SkipTest('Glance is not available') disk_id = api.vms.get(VM0_NAME).disks.get(GLANCE_DISK_NAME).id live_snap1_params = params.Snapshot( description='live_snap1', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap1_params)) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') live_snap2_params = params.Snapshot( description='live_snap2', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap2_params)) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_short(lambda: (api.vms.get( VM0_NAME).snapshots.list()[i].snapshot_status == 'ok')) nt.assert_true(api.vms.get(VM0_NAME).snapshots.list()[-2].delete()) testlib.assert_true_within_long( lambda: len(api.vms.get(VM0_NAME).snapshots.list()) == 2, ) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_long( lambda: (api.vms.get(VM0_NAME).snapshots.list()[i].snapshot_status == 'ok'), ) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up') testlib.assert_true_within_long(lambda: api.vms.get(VM0_NAME).disks.get( GLANCE_DISK_NAME).status.state == 'ok')
def createDisk(api, vm_name, storage_domain, disk_size, disk_format='cow', thin_provision=True, bootable=False, shareable=False, disk_name=None): #By default this function creates a non-bootable, non-shareable, thin provisioned cow formatted disk with a default name api.vms.get(vm_name).disks.add(params.Disk(storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(storage_domain)]),size=int(disk_size)*1024*1024*1024,status=None,interface='virtio',format=disk_format,sparse=thin_provision,bootable=bootable, shareable=shareable, alias=disk_name)) print "Waiting for disk %s to be created" % disk_name while api.vms.get(vm_name).disks.get(name=disk_name).status.state != 'ok': sleep(1) #activate disk if not api.vms.get(vm_name).disks.get(name=disk_name).active: print "Activating: %s" % disk_name api.vms.get(vm_name).disks.get(name=disk_name).activate()
def _create_disk(self): engine_api = engineapi.get_engine_api(self) now = time.localtime() p_sds = params.StorageDomains( storage_domain=[ engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID]) ) ] ) description = '{p}{t}'.format( p=ohostedcons.Const.BACKUP_DISK_PREFIX, t=time.strftime("%Y%m%d%H%M%S", now), ) disk_param = params.Disk( name='virtio-disk0', description=description, comment=description, alias='virtio-disk0', storage_domains=p_sds, size=int( self.environment[ohostedcons.Upgrade.BACKUP_SIZE_GB] )*1024*1024*1024, interface='virtio', format='raw', sparse=False, bootable=True, ) disk_broker = engine_api.disks.add(disk_param) d_img_id = disk_broker.get_id() d_vol_id = disk_broker.get_image_id() self.logger.debug('vol: {v}'.format(v=d_vol_id)) self.logger.debug('img: {v}'.format(v=d_img_id)) created = self._wait_disk_ready( engine_api, d_img_id, False, ) if not created: raise RuntimeError(_( 'Failed creating the new engine VM disk' )) self.environment[ ohostedcons.Upgrade.BACKUP_IMG_UUID ] = d_img_id self.environment[ ohostedcons.Upgrade.BACKUP_VOL_UUID ] = d_vol_id engine_api.disks.get( id=self.environment[ohostedcons.Upgrade.BACKUP_IMG_UUID] ).set_active(False)
def add_disk(vm_id, size, name=None, format='raw', interface='virtio', bootable='no', show=None, headers='yes', ovirt=None): """ Add a disk to a VM and prints out the disk details when done :param str vm_id: The Id of the VM to add the disk to :param str name: The name of the disk to add, if specified the disk will only be added if a disk of that name does not already exist :param int size: The size of the disk to create in bytes :param str format: The format of the disk to create (Use 'raw', the default unless you know oVirt well enough to know what you are doing) :param str interface: The interface of the disk (Use 'virtio', the default unless you know oVirt well enough to know what you are doing) :param str bootable: If 'yes' the disk will be bootable :param ovirtsdk.api.API ovirt: An open oVirt API connection The 'show' and 'headers' parameters are the same as for the 'query' task :returns: The disk that was added :rtype: ovirtsdk.infrastructure.brokers.Disk """ if name is not None: existing_disk = ovirt.disks.get(alias=name) if existing_disk: abort("Disk with name: '{0}' already exists".format(name)) vm = ovirt.vms.get(id=vm_id) if vm is None: abort("VM with specified ID '{0}' not found".format(vm_id)) disk_params = oVirtParams.Disk( size=int(size), format=format, interface=interface, bootable=(bootable == 'yes'), ) if name is not None: disk_params.name = name disk = vm.disks.add(disk_params) disk.activate(oVirtParams.Action(async=False)) oVirtObjectType.all_types['disk'].print_table((disk, ), show=show, headers=headers) return disk
def create_nfs_disk(ovirt_api, new_storage_id, disk, vm): print("[{}] Creating an NFS image for '{}'...".format(vm.name, disk.name)) new_storage_domain = ovirt_api.storagedomains.get(id=new_storage_id) disk_params = params.Disk() disk_params.set_alias(disk.name) disk_params.set_size(disk.size) disk_params.set_interface('virtio') disk_params.set_format('raw') new_disk = new_storage_domain.disks.add(disk_params) while new_disk.status.state != 'ok': new_disk = new_storage_domain.disks.get(id=new_disk.id) time.sleep(3) return new_disk
def add_disk(api): glance_disk = api.disks.get(GLANCE_DISK_NAME) if glance_disk: nt.assert_true( api.vms.get(VM0_NAME).disks.add( params.Disk( id=glance_disk.get_id(), active=True, bootable=True, ))) disk_params = params.Disk( size=10 * GB, provisioned_size=1, interface='virtio', format='cow', status=None, sparse=True, active=True, bootable=True, ) for vm_name, disk_name, sd_name in ((VM1_NAME, DISK1_NAME, SD_NFS_NAME), (VM2_NAME, DISK2_NAME, SD_SECOND_NFS_NAME)): disk_params.name = disk_name disk_params.storage_domains = params.StorageDomains(storage_domain=[ params.StorageDomain(name=sd_name, ), ]) nt.assert_true(api.vms.get(vm_name).disks.add(disk_params)) if glance_disk: testlib.assert_true_within_short(lambda: api.vms.get( VM0_NAME).disks.get(GLANCE_DISK_NAME).status.state == 'ok') for vm_name, disk_name in ((VM1_NAME, DISK1_NAME), (VM2_NAME, DISK2_NAME)): testlib.assert_true_within_short(lambda: api.vms.get(vm_name).disks. get(disk_name).status.state == 'ok')
def create_params(): storage_domain = api.storagedomains.get(storage_domain_name) if not storage_domain: print "Unable to find storage domain '%s'" % (storage_domain_name) return None storage_domain_params = params.StorageDomains( storage_domain=[storage_domain]) disk_params = params.Disk(storage_domains=storage_domain_params, size=size_gb * GB, status=None, interface='virtio', format='cow', sparse=True, bootable=False) return disk_params
def add_disk(api): disk_params = params.Disk( name=DISK0_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains(storage_domain=[ params.StorageDomain(name=MASTER_SD_NAME, ), ], ), status=None, sparse=True, bootable=True, ) api.vms.get(VM0_NAME).disks.add(disk_params) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).disks.get( DISK0_NAME).status.state == 'ok')
def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): VM = self.get_VM(vmname) newdisk = params.Disk( name=diskname, size=1024 * 1024 * 1024 * int(disksize), wipe_after_delete=True, sparse=diskallocationtype, interface=diskinterface, format=diskformat, bootable=diskboot, storage_domains=params.StorageDomains( storage_domain=[self.get_domain(diskdomain)] ) ) try: VM.disks.add(newdisk) VM.update() setMsg("Successfully added disk " + diskname) setChanged() except Exception as e: setFailed() setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") setMsg(str(e)) return False try: currentdisk = VM.disks.get(name=diskname) attempt = 1 while currentdisk.status.state != 'ok': currentdisk = VM.disks.get(name=diskname) if attempt == 100: setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) raise Exception() else: attempt += 1 time.sleep(2) setMsg("The disk " + diskname + " is ready.") except Exception as e: setFailed() setMsg("Error getting the state of " + diskname + ".") setMsg(str(e)) return False return True
def generic_import_from_glance(glance_provider, image_name=CIRROS_IMAGE_NAME, as_template=False, image_ext='_glance_disk', template_ext='_glance_template', dest_storage_domain=MASTER_SD_TYPE, dest_cluster=CLUSTER_NAME): target_image = glance_provider.images.get(name=image_name) disk_name = image_name.replace(" ", "_") + image_ext template_name = image_name.replace(" ", "_") + template_ext import_action = params.Action( storage_domain=params.StorageDomain(name=dest_storage_domain, ), cluster=params.Cluster(name=dest_cluster, ), import_as_template=as_template, disk=params.Disk(name=disk_name, ), template=params.Template(name=template_name, ), ) nt.assert_true(target_image.import_image(import_action))
def hotplug_disk(api): disk2_params = params.Disk( name=DISK1_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains(storage_domain=[ params.StorageDomain(name='nfs', ), ], ), status=None, sparse=True, bootable=False, ) api.vms.get(VM1_NAME).disks.add(disk2_params) testlib.assert_true_within( func=(lambda: api.vms.get(VM1_NAME, ).disks.get(DISK1_NAME).status. state == 'ok'), timeout=SHORT_TIMEOUT, )
def _attach(vm, disk, activate): """ Attach specified disk to VM Parameters: disk - ovirtsdk.infrastructure.brokers.Disk object to attach vm - ovirtsdk.infrastructure.brokers.VM object to attach disk to Returns: 0 - No change 1 - Change """ vm_disk = vm.disks.get(id=disk.id) # Check if disk is already attached to the VM if vm_disk: # Disk is already attached so lets activate it if activate: return _activate(vm_disk) else: if not MODULE_CHECK_MODE: vm_disk = vm.disks.add(params.Disk(id=disk.id, active=activate)) return 1 return 0
def __new_disk(self, size, name): disk_size = 1024**2 * size disk_type = 'system' disk_interface = 'virtio' disk_format = 'cow' disk_bootable = True vm = self.__entrypoint().vms.get(name=name) sd = params.StorageDomains(storage_domain=[ self.__entrypoint().storagedomains.get(name='STORAGE_DOMAIN') ]) disk_params = params.Disk(storage_domains=sd, size=disk_size, type_=disk_type, interface=disk_interface, format=disk_format, bootable=disk_bootable) try: d = vm.disks.add(disk=disk_params) print('Disk %s added to %s' % (d.get_name(), vm.get_name())) except Exception as ex: print('Unexpected Error: %s' % ex)
def create_disk(api, vm_name, disk_alias, disk_size_gb, disk_alloc, disk_iface): """Create a new disk with specified name and size. Attach to vm_name""" try: vm = api.vms.get(name=vm_name) disks = vm.disks.list(alias=disk_alias) if disks: return 0 if not MODULE_CHECK_MODE: size = int(disk_size_gb) * 1024 * 1024 * 1024 disk_params = params.Disk() disk_params.set_wipe_after_delete(True) disk_params.set_active(True) disk_params.set_alias(disk_alias) disk_params.set_provisioned_size(size) disk_params.set_interface(disk_iface) if disk_alloc == "raw": disk_params.set_sparse(False) disk_params.set_format(disk_alloc) disk = vm.disks.add(disk_params) # TODO: Use VMDisk.get_creation_status to wait until disk creation completes return 1 except Exception as e: raise Exception("Error while creating new disk: " + str(e))