def snapshot_merge(api): dead_snap1_params = params.Snapshot( description='dead_snap1', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM0_NAME).disks.get(DISK0_NAME).id, ), ], ), ) api.vms.get(VM0_NAME).snapshots.add(dead_snap1_params) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') dead_snap2_params = params.Snapshot( description='dead_snap2', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM0_NAME).disks.get(DISK0_NAME).id, ), ], ), ) api.vms.get(VM0_NAME).snapshots.add(dead_snap2_params) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') api.vms.get(VM0_NAME).snapshots.list()[-2].delete() testlib.assert_true_within_short( lambda: (len(api.vms.get(VM0_NAME).snapshots.list()) == 2) and (api.vms.get(VM0_NAME).snapshots.list()[-1].snapshot_status == 'ok'), )
def snapshot_cold_merge(api): if api.vms.get(VM1_NAME) is None: raise SkipTest('Glance is not available') dead_snap1_params = params.Snapshot( description='dead_snap1', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM1_NAME).disks.get(DISK1_NAME).id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(dead_snap1_params) testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).snapshots. list()[-1].snapshot_status == 'ok') dead_snap2_params = params.Snapshot( description='dead_snap2', persist_memorystate=False, disks=params.Disks(disk=[ params.Disk(id=api.vms.get(VM1_NAME).disks.get(DISK1_NAME).id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(dead_snap2_params) testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).snapshots. list()[-1].snapshot_status == 'ok') api.vms.get(VM1_NAME).snapshots.list()[-2].delete() testlib.assert_true_within_long( lambda: (len(api.vms.get(VM1_NAME).snapshots.list()) == 2) and (api.vms.get(VM1_NAME).snapshots.list()[-1].snapshot_status == 'ok'), )
def snapshot_live_merge(api): disk = api.vms.get(VM1_NAME).disks.list()[0] disk_id = disk.id disk_name = disk.name live_snap1_params = params.Snapshot( description='live_snap1', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(live_snap1_params) testlib.assert_true_within( func=(lambda: api.vms.get(VM1_NAME).snapshots.list()[-1]. snapshot_status == 'ok'), timeout=SHORT_TIMEOUT, ) live_snap2_params = params.Snapshot( description='live_snap2', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) api.vms.get(VM1_NAME).snapshots.add(live_snap2_params) for i, _ in enumerate(api.vms.get(VM1_NAME).snapshots.list()): testlib.assert_true_within( func=(lambda: (api.vms.get(VM1_NAME).snapshots.list()[i]. snapshot_status == 'ok')), timeout=SHORT_TIMEOUT, ) api.vms.get(VM1_NAME).snapshots.list()[-2].delete() testlib.assert_true_within_long( lambda: len(api.vms.get(VM1_NAME).snapshots.list()) == 2, ) for i, _ in enumerate(api.vms.get(VM1_NAME).snapshots.list()): testlib.assert_true_within_long( lambda: (api.vms.get(VM1_NAME).snapshots.list()[i].snapshot_status == 'ok'), ) testlib.assert_true_within_short( lambda: api.vms.get(VM1_NAME).status.state == 'up', ) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok', )
def snapshot_live_merge(api): raise SkipTest( "[02/04/17] Test is failing for weeks without real knowladge on the reason, despite debugging from storage team" ) disk = api.vms.get(VM0_NAME).disks.list()[0] disk_id = disk.id disk_name = disk.name live_snap1_params = params.Snapshot( description='live_snap1', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap1_params)) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') live_snap2_params = params.Snapshot( description='live_snap2', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap2_params)) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_short(lambda: (api.vms.get( VM0_NAME).snapshots.list()[i].snapshot_status == 'ok')) nt.assert_true(api.vms.get(VM0_NAME).snapshots.list()[-2].delete()) testlib.assert_true_within_long( lambda: len(api.vms.get(VM0_NAME).snapshots.list()) == 2, ) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_long( lambda: (api.vms.get(VM0_NAME).snapshots.list()[i].snapshot_status == 'ok'), ) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up') testlib.assert_true_within_long(lambda: api.vms.get(VM0_NAME).disks.get( disk_name).status.state == 'ok')
def create_vm_template(conn, vmname, image, zone): vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True)) try: conn.vms.add(vmparams) except Exception: raise Exception('error adding template %s' % image)
def clone_snapshot(api, config, vm_from_list): """ Clone snapshot into a new vm :param api: ovirtsdk api :param config: Configuration :vm: VM to clone """ vm_clone_name = vm_from_list + config.get_vm_middle() + config.get_vm_suffix() vm = api.vms.get(vm_from_list) snapshots = vm.snapshots.list(description=config.get_snapshot_description()) if not snapshots: logger.error("!!! No snapshot found !!!") has_errors = True snapshot=snapshots[0] # Find the storage domain where the disks should be created: sd = api.storagedomains.get(name=config.get_destination_domain()) # Find the image identifiers of the disks of the snapshot, as # we need them in order to explicitly indicate that we want # them created in a different storage domain: disk_ids = [] for current in snapshot.disks.list(): disk_ids.append(current.get_id()) # Prepare the list of disks for the operation to create the # snapshot,explicitly indicating for each of them the storage # domain where it should be created: disk_list = [] for disk_id in disk_ids: disk = params.Disk( image_id=disk_id, storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( id=sd.get_id(), ), ], ), ) disk_list.append(disk) snapshot_param = params.Snapshot(id=snapshot.id) snapshots_param = params.Snapshots(snapshot=[snapshot_param]) logger.info("Clone into VM (%s) started ..." % vm_clone_name) if not config.get_dry_run(): api.vms.add(params.VM( name=vm_clone_name, memory=vm.get_memory(), cluster=api.clusters.get(config.get_cluster_name()), snapshots=snapshots_param, disks=params.Disks( disk=disk_list, ) ) ) VMTools.wait_for_vm_operation(api, config, "Cloning", vm_from_list) logger.info("Cloning finished")
def snapshot_live_merge(api): if api.vms.get(VM0_NAME).disks.get(GLANCE_DISK_NAME) is None: raise SkipTest('Glance is not available') disk_id = api.vms.get(VM0_NAME).disks.get(GLANCE_DISK_NAME).id live_snap1_params = params.Snapshot( description='live_snap1', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap1_params)) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).snapshots. list()[-1].snapshot_status == 'ok') live_snap2_params = params.Snapshot( description='live_snap2', persist_memorystate=True, disks=params.Disks(disk=[ params.Disk(id=disk_id, ), ], ), ) nt.assert_true(api.vms.get(VM0_NAME).snapshots.add(live_snap2_params)) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_short(lambda: (api.vms.get( VM0_NAME).snapshots.list()[i].snapshot_status == 'ok')) nt.assert_true(api.vms.get(VM0_NAME).snapshots.list()[-2].delete()) testlib.assert_true_within_long( lambda: len(api.vms.get(VM0_NAME).snapshots.list()) == 2, ) for i, _ in enumerate(api.vms.get(VM0_NAME).snapshots.list()): testlib.assert_true_within_long( lambda: (api.vms.get(VM0_NAME).snapshots.list()[i].snapshot_status == 'ok'), ) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up') testlib.assert_true_within_long(lambda: api.vms.get(VM0_NAME).disks.get( GLANCE_DISK_NAME).status.state == 'ok')
def createVMimage(self, name, cluster, template): try: vmparams = params.VM( name=name, cluster=self.conn.clusters.get(name=cluster), template=self.conn.templates.get(name=template), disks=params.Disks(clone=True) ) self.conn.vms.add(vmparams) setMsg("VM is created") setChanged() return True except Exception as e: setMsg("Failed to create VM") setMsg(str(e)) setFailed() return False
def prepare_rhevm_template(): tmp = { 'template_disks': params.Disks(clone=appliance['clone_template']), 'cluster_object': api.clusters.get(name=appliance['cluster']), 'host_object': api.hosts.get(appliance['host']), 'migrate': appliance['migrate'], 'appliance_nics': appliance['NICS'][:], 'appliance_memory': appliance['memory_size'], 'appliance_type': appliance['vm_type'], 'num_cores': appliance['cores'], 'num_cpus': appliance['cpus'], 'storage_name': appliance['disk_location'], 'disks': appliance['disks'] } tmp['cpu_topology'] = params.CpuTopology(cores=tmp['num_cores'], threads=tmp['num_cpus']) tmp['cpu_object'] = params.CPU(topology=tmp['cpu_topology']) tmp['domain_object'] = api.storagedomains.get(name=tmp['storage_name']) tmp['actions'] = params.Action(storage_domain=tmp['domain_object']) tmp['placement_object'] = params.VmPlacementPolicy(host=tmp['host_object'], affinity=tmp['migrate']) return tmp
def main(argv): usage = "backup.py -c <config.cfg>" try: opts, args = getopt(argv, "hc:d") debug = False if not opts: print usage sys.exit(1) for opt, arg in opts: if (opt == "-h") or (opt == "--help"): print usage sys.exit(0) elif opt in ("-c"): config_file = arg elif opt in ("-d"): debug = True except GetoptError: print usage sys.exit(1) global config config = Config(config_file, debug) time_start = int(time.time()) has_errors = False # Connect to server connect() # Test if all VM names are valid for vm_from_list in config.get_vm_names(): if not api.vms.get(vm_from_list): print "!!! There are no VM with the following name in your cluster: " + vm_from_list api.disconnect() sys.exit(1) vms_with_failures = list(config.get_vm_names()) for vm_from_list in config.get_vm_names(): config.clear_vm_suffix() vm_clone_name = vm_from_list + config.get_vm_middle( ) + config.get_vm_suffix() # Check VM name length limitation length = len(vm_clone_name) if length > config.get_vm_name_max_length(): print "!!! VM name with middle and suffix are to long (size: " + str( length) + ", allowed " + str( config.get_vm_name_max_length()) + ") !!!" Logger.log("VM name: " + vm_clone_name) api.disconnect() sys.exit(1) Logger.log("Start backup for: " + vm_from_list) try: # Get the VM vm = api.vms.get(vm_from_list) # Cleanup: Delete the cloned VM VMTools.delete_vm(api, config, vm_from_list) # Delete old backup snapshots VMTools.delete_snapshots(vm, config, vm_from_list) # Determine disks to snapshot vm_disks = [] try: config_disks = config.get_vm_disks()[vm_from_list] except KeyError: config_disks = None for vm_disk in vm.disks.list(): if config_disks is None or vm_disk.get_name() in config_disks: vm_disks.append(vm_disk) # Create a VM snapshot: try: Logger.log("Snapshot creation started ...") if not config.get_dry_run(): vm.snapshots.add( params.Snapshot( description=config.get_snapshot_description(), vm=vm, disks=params.Disks(disk=vm_disks))) VMTools.wait_for_snapshot_operation(vm, config, "creation") Logger.log("Snapshot created") except Exception as e: Logger.log("Can't create snapshot for VM: " + vm_from_list) Logger.log("DEBUG: " + str(e)) has_errors = True continue # Clone the snapshot into a VM snapshots = vm.snapshots.list( description=config.get_snapshot_description()) if not snapshots: Logger.log("!!! No snapshot found") has_errors = True continue snapshot = snapshots[0] snapshot_param = params.Snapshot(id=snapshot.id) snapshots_param = params.Snapshots(snapshot=[snapshot_param], collapse_snapshots=True) if config.get_vm_clone_domain() is not None: clone_sd = api.storagedomains.get( name=config.get_vm_clone_domain()) if not clone_sd: Logger.log( "!!! Unknown storage domain value for vm_clone_domain") has_errors = True continue vm_clone_disks = [] for disk in snapshot.disks.list(): vm_clone_disks.append( params.Disk(image_id=disk.get_id(), storage_domains=params.StorageDomains( storage_domain=[clone_sd]))) else: vm_clone_disks = snapshot.disks.list() Logger.log("Clone into VM started ...") if not config.get_dry_run(): api.vms.add( params.VM(name=vm_clone_name, memory=vm.get_memory(), cluster=api.clusters.get( config.get_cluster_name()), snapshots=snapshots_param, disks=params.Disks(disk=vm_clone_disks))) VMTools.wait_for_vm_operation(api, config, "Cloning", vm_from_list) Logger.log("Cloning finished") # Delete backup snapshots VMTools.delete_snapshots(vm, config, vm_from_list) # Delete old backups VMTools.delete_old_backups(api, config, vm_from_list) # Export the VM try: vm_clone = api.vms.get(vm_clone_name) Logger.log("Export started ...") if not config.get_dry_run(): vm_clone.export( params.Action(storage_domain=api.storagedomains.get( config.get_export_domain()))) VMTools.wait_for_vm_operation(api, config, "Exporting", vm_from_list) Logger.log("Exporting finished") except Exception as e: Logger.log("Can't export cloned VM (" + vm_clone_name + ") to domain: " + config.get_export_domain()) Logger.log("DEBUG: " + str(e)) has_errors = True continue # Delete the VM VMTools.delete_vm(api, config, vm_from_list) time_end = int(time.time()) time_diff = (time_end - time_start) time_minutes = int(time_diff / 60) time_seconds = time_diff % 60 Logger.log("Duration: " + str(time_minutes) + ":" + str(time_seconds) + " minutes") Logger.log("VM exported as " + vm_clone_name) Logger.log("Backup done for: " + vm_from_list) vms_with_failures.remove(vm_from_list) except errors.ConnectionError as e: Logger.log("!!! Can't connect to the server" + str(e)) connect() continue except errors.RequestError as e: Logger.log("!!! Got a RequestError: " + str(e)) has_errors = True continue except Exception as e: Logger.log("!!! Got unexpected exception: " + str(e)) api.disconnect() sys.exit(1) Logger.log("All backups done") if vms_with_failures: Logger.log("Backup failured for:") for i in vms_with_failures: Logger.log(" " + i) if has_errors: Logger.log( "Some errors occured during the backup, please check the log file") api.disconnect() sys.exit(1) # Disconnect from the server api.disconnect()
logDebug( "Using template %s" %( templatename ) ) # check if vmname already exist EXIT_ON = "CHECKVMNAME" checkVMName(VMNAME) #now try to create a new vm try: logDebug( "Creating VM %s..." %( VMNAME ) ) sdesc = "Created by addNewVM.py" # 70% memory guaranteed mguaranteed = int(MEMORY*GB*0.70) api.vms.add(params.VM(name=VMNAME, memory=MEMORY*GB, cluster=api.clusters.get(CLUSTER), template=api.templates.get(templatename), description=sdesc, memory_policy=params.MemoryPolicy(guaranteed=mguaranteed), disks=params.Disks(clone=False) )) logDebug( "VM %s created, waiting to disk allocation (preallocated disk)" %( VMNAME ) ) #now wait until is down vm = api.vms.get(name=VMNAME) while ( vm.get_status().state != 'down' ): logDebug( "VM %s is on state %s, sleeping %s seconds" %( vm.get_name(), vm.get_status().state, str( SLEEPTIME ) ) ) sleep(SLEEPTIME) vm = api.vms.get(name=VMNAME) except Exception, err: logDebug( "Error on creating a new vm %s" %( VMNAME ), 2 ) logDebug( Exception, 2) logDebug( err, 2) #rename disk alias
def makeTemplate(self, name, comments, machineId, clusterId, storageId, displayType): ''' Publish the machine (makes a template from it so we can create COWs) and returns the template id of the creating machine Args: name: Name of the machine (care, only ascii characters and no spaces!!!) machineId: id of the machine to be published clusterId: id of the cluster that will hold the machine storageId: id of the storage tuat will contain the publication AND linked clones displayType: type of display (for oVirt admin interface only) Returns Raises an exception if operation could not be acomplished, or returns the id of the template being created. ''' logger.debug( "n: {0}, c: {1}, vm: {2}, cl: {3}, st: {4}, dt: {5}".format( name, comments, machineId, clusterId, storageId, displayType)) try: lock.acquire(True) api = self.__getApi() cluster = api.clusters.get(id=clusterId) vm = api.vms.get(id=machineId) if vm is None: raise Exception('Machine not found') if cluster is None: raise Exception('Cluster not found') if vm.get_status().get_state() != 'down': raise Exception('Machine must be in down state to publish it') print(vm.disks.list()) # Create disks description to be created in specified storage domain, one for each disk sd = params.StorageDomains( storage_domain=[params.StorageDomain(id=storageId)]) fix = not self._isFullyFunctionalVersion(api)[ 0] # If we need a fix for "publish" dsks = [] for dsk in vm.disks.list(): dsks.append( params.Disk(id=dsk.get_id(), storage_domains=sd, alias=dsk.get_alias())) # dsks.append(dsk) disks = params.Disks(disk=dsks) # Create display description # display = params.Display(type_=displayType) # TODO: Restore proper template creation mechanism if fix is True: vm = params.VM(id=vm.get_id()) else: vm = params.VM(id=vm.get_id(), disks=disks) template = params.Template( name=name, vm=vm, cluster=params.Cluster(id=cluster.get_id()), description=comments) # display=display) return api.templates.add(template).get_id() finally: lock.release()
def create_vm(vmprefix,disksize, storagedomain,network, vmcores,vmsockets,addstorage): print ("------------------------------------------------------") print ("Creating", num, "RHEV based virtual machines") print ("-------------------------------------------------------") for machine in range(0,int(num)): try: vm_name = str(vmprefix) + "_" + str(machine) + "_sockets_" + str(vmsockets) vm_memory = int(memory)*1024*1024*1024 vm_cluster = api.clusters.get(name=cluster) vm_template = api.templates.get(name=vmtemplate) vm_os = params.OperatingSystem(boot=[params.Boot(dev="hd")]) cpu_params = params.CPU(topology=params.CpuTopology(sockets=vmsockets,cores=vmcores)) # set proper VM parameters - based on will VM be on "thin" disk or "preallocated" disk if vmdiskpreallocated == "yes": vm_params = params.VM(name=vm_name,memory=vm_memory,cluster=vm_cluster,template=vm_template,os=vm_os,cpu=cpu_params, disks=params.Disks(clone=True)) elif vmdiskpreallocated == "no": vm_params = params.VM(name=vm_name,memory=vm_memory,cluster=vm_cluster, template=vm_template, os=vm_os,cpu=cpu_params) print ("creating virtual machine", vm_name) api.vms.add(vm=vm_params) api.vms.get(vm_name).nics.add(params.NIC(name=nicname, network=params.Network(name=network), interface='virtio')) # update vm and add disk to it wait_vm_state(vm_name,"down") print ("Virtual machine created: ", vm_name, "and it has parameters"," memory:", memory,"[GB]", " cores:", vmcores, " sockets", vmsockets, " waiting on machine to unlock so we proceed with configuration") wait_vm_state(vm_name, "down") diskname = "disk_" + str(vmprefix) + str(machine) # if there is necessary to add additional disk to VM - can be preallocated or thin if addstorage == "yes" and diskpreallocated == "no": for disk in range(0,int(numdisks)): # add one disk at time - one will be added by default - only add thin disks api.vms.get(vm_name).disks.add(params.Disk(name=diskname + "_" + str(disk), storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name=storagedomain)]), size=int(disksize)*1024*1024*1024, status=None, interface='virtio', format='cow', sparse=True, bootable=False)) print ("Disk of size:",disksize,"GB originating from", storagedomain, "storage domain is attached to VM - but we cannot start machine before disk is in OK state" " starting machine with disk attached to VM and same time having disk in Locked state will result in machine start failure") wait_disk_state(diskname + "_" + str(disk) ,"ok") print ("Machine", vm_name, "is ready to be started") api.vms.get(vm_name).start() print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]", "cores:", vmcores, " sockets", vmsockets, " storage disk", disksize, "[GB]") elif addstorage == "yes" and diskpreallocated == "yes": for disk in range(0, int(numdisks)): api.vms.get(vm_name).disks.add(params.Disk(name=diskname + "_" + str(disk) , storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name=storagedomain)]), size=int(disksize)*1024*1024*1024, status=None, interface='virtio', format='raw', sparse=False, bootable=False )) # if disk is not in "OK" state ... wait here - we cannot start machine if this is not the case print ("Disk of size:",disksize,"GB originating from", storagedomain, "storage domain is attached to VM - but we cannot start machine before disk is in OK state" " starting machine with disk attached to VM and same time having disk in Locked state will result in machine start failure") wait_disk_state(diskname + "_" + str(disk) ,"ok") print ("Machine", vm_name, "is ready to be started") api.vms.get(vm_name).start() print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]" " cores:", vmcores, " sockets", vmsockets, " storage disk", disksize, "[GB]" ) elif addstorage == "no": print ("addstorage=no was specified for", vm_name,"no additional disk will be added, starting VM:", vm_name) api.vms.get(vm_name).start() print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]" "cores:", vmcores, "sockets:", vmsockets, "storage_disk", disksize, "[GB]" ) except Exception as e: print ("Adding virtual machine '%s' failed: %s", vm_name, e)