def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): if vmdisk_alloc == 'thin': # define VM params vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype) # define disk params vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) # define network parameters network_net = params.Network(name=vmnetwork) nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') elif vmdisk_alloc == 'preallocated': # define VM params vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype) # define disk params vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) # define network parameters network_net = params.Network(name=vmnetwork) nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') try: conn.vms.add(vmparams) except: print "Error creating VM with specified parameters" sys.exit(1) vm = conn.vms.get(name=vmname) try: vm.disks.add(vmdisk) except: print "Error attaching disk" try: vm.nics.add(nic_net1) except: print "Error adding nic"
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface, temp_vm_name, provider): """Adds second disk to a temporary VM. Args: api: API to chosen RHEVM provider. sdomain: Storage domain to save new disk onto. disk_size: Size of the new disk (in B). disk_format: Format of the new disk. disk_interface: Interface of the new disk. """ try: if len(api.vms.get(temp_vm_name).disks.list()) > 1: logger.info("RHEVM:%r Warning: found more than one disk in existing VM (%r).", provider, temp_vm_name) logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider) return actual_sdomain = api.storagedomains.get(sdomain) temp_vm = api.vms.get(temp_vm_name) storage_id = params.StorageDomains(storage_domain=[params.StorageDomain (id=actual_sdomain.get_id())]) params_disk = params.Disk(storage_domains=storage_id, size=disk_size, interface=disk_interface, format=disk_format) temp_vm.disks.add(params_disk) wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900) # check, if there are two disks if len(api.vms.get(temp_vm_name).disks.list()) < 2: logger.error("RHEVM:%r Disk failed to add", provider) sys.exit(127) logger.info("RHEVM:%r Successfully added disk", provider) except Exception: logger.exception("RHEVM:%r add_disk_to_temp_vm failed:", provider)
def add_disk(api): glance_disk = api.disks.get(GLANCE_DISK_NAME) if glance_disk: nt.assert_true( api.vms.get(VM0_NAME).disks.add( params.Disk( id=glance_disk.get_id(), active=True, bootable=True, ))) disk_params = params.Disk( name=DISK1_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains(storage_domain=[ params.StorageDomain(name='nfs', ), ], ), status=None, sparse=True, active=True, bootable=True, ) nt.assert_true(api.vms.get(VM1_NAME).disks.add(disk_params)) if glance_disk: testlib.assert_true_within_short(lambda: api.vms.get( VM0_NAME).disks.get(GLANCE_DISK_NAME).status.state == 'ok') testlib.assert_true_within_short(lambda: api.vms.get(VM1_NAME).disks.get( DISK1_NAME).status.state == 'ok')
def hotplug_disk(api): disk2_params = params.Disk( name=DISK1_NAME, size=9 * GB, provisioned_size=2, interface='virtio', format='cow', storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( name='iscsi', ), ], ), status=None, sparse=True, bootable=False, active=True, ) api.vms.get(VM0_NAME).disks.add(disk2_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).disks.get(DISK1_NAME).status.state == 'ok' ) nt.assert_true(api.vms.get(VM0_NAME).disks.get(DISK1_NAME).active)
def clone_snapshot(api, config, vm_from_list): """ Clone snapshot into a new vm :param api: ovirtsdk api :param config: Configuration :vm: VM to clone """ vm_clone_name = vm_from_list + config.get_vm_middle() + config.get_vm_suffix() vm = api.vms.get(vm_from_list) snapshots = vm.snapshots.list(description=config.get_snapshot_description()) if not snapshots: logger.error("!!! No snapshot found !!!") has_errors = True snapshot=snapshots[0] # Find the storage domain where the disks should be created: sd = api.storagedomains.get(name=config.get_destination_domain()) # Find the image identifiers of the disks of the snapshot, as # we need them in order to explicitly indicate that we want # them created in a different storage domain: disk_ids = [] for current in snapshot.disks.list(): disk_ids.append(current.get_id()) # Prepare the list of disks for the operation to create the # snapshot,explicitly indicating for each of them the storage # domain where it should be created: disk_list = [] for disk_id in disk_ids: disk = params.Disk( image_id=disk_id, storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( id=sd.get_id(), ), ], ), ) disk_list.append(disk) snapshot_param = params.Snapshot(id=snapshot.id) snapshots_param = params.Snapshots(snapshot=[snapshot_param]) logger.info("Clone into VM (%s) started ..." % vm_clone_name) if not config.get_dry_run(): api.vms.add(params.VM( name=vm_clone_name, memory=vm.get_memory(), cluster=api.clusters.get(config.get_cluster_name()), snapshots=snapshots_param, disks=params.Disks( disk=disk_list, ) ) ) VMTools.wait_for_vm_operation(api, config, "Cloning", vm_from_list) logger.info("Cloning finished")
def add(self, memory, disk_size, cluster_name, storage_name, nic_name='eth0', network_interface='virtio', network_name='ovirtmgmt', disk_interface='virtio', disk_format='raw', template_name='Blank'): """ Create VM with one NIC and one Disk. @memory: VM's memory size such as 1024*1024*1024=1GB. @disk_size: VM's disk size such as 512*1024=512MB. @nic_name: VM's NICs name such as 'eth0'. @network_interface: VM's network interface such as 'virtio'. @network_name: network such as ovirtmgmt for ovirt, rhevm for rhel. @disk_format: VM's disk format such as 'raw' or 'cow'. @disk_interface: VM's disk interface such as 'virtio'. @cluster_name: cluster name. @storage_name: storage domain name. @template_name: VM's template name, default is 'Blank'. """ # network name is ovirtmgmt for ovirt, rhevm for rhel. vm_params = param.VM(name=self.name, memory=memory, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) storage = self.api.storagedomains.get(storage_name) storage_params = param.StorageDomains(storage_domain=[storage]) nic_params = param.NIC(name=nic_name, network=param.Network(name=network_name), interface=network_interface) disk_params = param.Disk(storage_domains=storage_params, size=disk_size, type_='system', status=None, interface=disk_interface, format=disk_format, sparse=True, bootable=True) try: logging.info('Creating a VM %s' % self.name) self.api.vms.add(vm_params) logging.info('NIC is added to VM %s' % self.name) self.instance.nics.add(nic_params) logging.info('Disk is added to VM %s' % self.name) self.instance.disks.add(disk_params) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': time.sleep(1) except Exception, e: logging.error('Failed to create VM with disk and NIC\n%s' % str(e))
def createDisk(api, vm_name, storage_domain, disk_size, disk_format='cow', thin_provision=True, bootable=False, shareable=False, disk_name=None): #By default this function creates a non-bootable, non-shareable, thin provisioned cow formatted disk with a default name api.vms.get(vm_name).disks.add(params.Disk(storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(storage_domain)]),size=int(disk_size)*1024*1024*1024,status=None,interface='virtio',format=disk_format,sparse=thin_provision,bootable=bootable, shareable=shareable, alias=disk_name)) print "Waiting for disk %s to be created" % disk_name while api.vms.get(vm_name).disks.get(name=disk_name).status.state != 'ok': sleep(1) #activate disk if not api.vms.get(vm_name).disks.get(name=disk_name).active: print "Activating: %s" % disk_name api.vms.get(vm_name).disks.get(name=disk_name).activate()
def _create_disk(self): engine_api = engineapi.get_engine_api(self) now = time.localtime() p_sds = params.StorageDomains( storage_domain=[ engine_api.storagedomains.get( id=str(self.environment[ohostedcons.StorageEnv.SD_UUID]) ) ] ) description = '{p}{t}'.format( p=ohostedcons.Const.BACKUP_DISK_PREFIX, t=time.strftime("%Y%m%d%H%M%S", now), ) disk_param = params.Disk( name='virtio-disk0', description=description, comment=description, alias='virtio-disk0', storage_domains=p_sds, size=int( self.environment[ohostedcons.Upgrade.BACKUP_SIZE_GB] )*1024*1024*1024, interface='virtio', format='raw', sparse=False, bootable=True, ) disk_broker = engine_api.disks.add(disk_param) d_img_id = disk_broker.get_id() d_vol_id = disk_broker.get_image_id() self.logger.debug('vol: {v}'.format(v=d_vol_id)) self.logger.debug('img: {v}'.format(v=d_img_id)) created = self._wait_disk_ready( engine_api, d_img_id, False, ) if not created: raise RuntimeError(_( 'Failed creating the new engine VM disk' )) self.environment[ ohostedcons.Upgrade.BACKUP_IMG_UUID ] = d_img_id self.environment[ ohostedcons.Upgrade.BACKUP_VOL_UUID ] = d_vol_id engine_api.disks.get( id=self.environment[ohostedcons.Upgrade.BACKUP_IMG_UUID] ).set_active(False)
def create_params(): storage_domain = api.storagedomains.get(storage_domain_name) if not storage_domain: print "Unable to find storage domain '%s'" % (storage_domain_name) return None storage_domain_params = params.StorageDomains( storage_domain=[storage_domain]) disk_params = params.Disk(storage_domains=storage_domain_params, size=size_gb * GB, status=None, interface='virtio', format='cow', sparse=True, bootable=False) return disk_params
def add_disk(api): disk_params = params.Disk( name=DISK0_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains(storage_domain=[ params.StorageDomain(name=MASTER_SD_NAME, ), ], ), status=None, sparse=True, bootable=True, ) api.vms.get(VM0_NAME).disks.add(disk_params) testlib.assert_true_within_short(lambda: api.vms.get(VM0_NAME).disks.get( DISK0_NAME).status.state == 'ok')
def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): VM = self.get_VM(vmname) newdisk = params.Disk( name=diskname, size=1024 * 1024 * 1024 * int(disksize), wipe_after_delete=True, sparse=diskallocationtype, interface=diskinterface, format=diskformat, bootable=diskboot, storage_domains=params.StorageDomains( storage_domain=[self.get_domain(diskdomain)] ) ) try: VM.disks.add(newdisk) VM.update() setMsg("Successfully added disk " + diskname) setChanged() except Exception as e: setFailed() setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") setMsg(str(e)) return False try: currentdisk = VM.disks.get(name=diskname) attempt = 1 while currentdisk.status.state != 'ok': currentdisk = VM.disks.get(name=diskname) if attempt == 100: setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) raise Exception() else: attempt += 1 time.sleep(2) setMsg("The disk " + diskname + " is ready.") except Exception as e: setFailed() setMsg("Error getting the state of " + diskname + ".") setMsg(str(e)) return False return True
def hotplug_disk(api): disk2_params = params.Disk( name=DISK1_NAME, size=10 * GB, provisioned_size=1, interface='virtio', format='cow', storage_domains=params.StorageDomains(storage_domain=[ params.StorageDomain(name='nfs', ), ], ), status=None, sparse=True, bootable=False, ) api.vms.get(VM1_NAME).disks.add(disk2_params) testlib.assert_true_within( func=(lambda: api.vms.get(VM1_NAME, ).disks.get(DISK1_NAME).status. state == 'ok'), timeout=SHORT_TIMEOUT, )
def __new_disk(self, size, name): disk_size = 1024**2 * size disk_type = 'system' disk_interface = 'virtio' disk_format = 'cow' disk_bootable = True vm = self.__entrypoint().vms.get(name=name) sd = params.StorageDomains(storage_domain=[ self.__entrypoint().storagedomains.get(name='STORAGE_DOMAIN') ]) disk_params = params.Disk(storage_domains=sd, size=disk_size, type_=disk_type, interface=disk_interface, format=disk_format, bootable=disk_bootable) try: d = vm.disks.add(disk=disk_params) print('Disk %s added to %s' % (d.get_name(), vm.get_name())) except Exception as ex: print('Unexpected Error: %s' % ex)
def add_disk(api): glance_disk = api.disks.get(GLANCE_DISK_NAME) if glance_disk: nt.assert_true( api.vms.get(VM0_NAME).disks.add( params.Disk( id=glance_disk.get_id(), active=True, bootable=True, ))) disk_params = params.Disk( size=10 * GB, provisioned_size=1, interface='virtio', format='cow', status=None, sparse=True, active=True, bootable=True, ) for vm_name, disk_name, sd_name in ((VM1_NAME, DISK1_NAME, SD_NFS_NAME), (VM2_NAME, DISK2_NAME, SD_SECOND_NFS_NAME)): disk_params.name = disk_name disk_params.storage_domains = params.StorageDomains(storage_domain=[ params.StorageDomain(name=sd_name, ), ]) nt.assert_true(api.vms.get(vm_name).disks.add(disk_params)) if glance_disk: testlib.assert_true_within_short(lambda: api.vms.get( VM0_NAME).disks.get(GLANCE_DISK_NAME).status.state == 'ok') for vm_name, disk_name in ((VM1_NAME, DISK1_NAME), (VM2_NAME, DISK2_NAME)): testlib.assert_true_within_short(lambda: api.vms.get(vm_name).disks. get(disk_name).status.state == 'ok')
def AddVmDisk(vm_name, disk_size, disk_type, disk_interface, disk_format, disk_bootable, disk_storage): vm = api.vms.get(vm_name) sd = params.StorageDomains( storage_domain=[api.storagedomains.get(name=disk_storage)]) if disk_format == 'raw': sparse = 'false' elif disk_format == 'cow': sparse = 'true' disk_params = params.Disk(storage_domains=sd, size=disk_size * GB, type_=disk_type, interface=disk_interface, format=disk_format, sparse=sparse, bootable=disk_bootable) try: d = vm.disks.add(disk_params) print "Disk '%s' added to '%s'." % (d.get_name(), vm.get_name()) except Exception as ex: print "Adding disk to '%s' failed: %s" % (vm.get_name(), ex)
def trigger_add_vm(**kwargs): vm_params = params.VM(name=kwargs['vm_name'], template=kwargs['template_object'], disks=kwargs['template_disks'], cluster=kwargs['cluster_object'], host=kwargs['host_object'], cpu=kwargs['cpu_object'], memory=kwargs['appliance_memory'] * GB, placement_policy=kwargs['placement_object'], type_=kwargs['appliance_type']) try: cfme_appliance = api.vms.add(vm_params) except RequestError as E: print("Error while creating vm(s)") sys.exit(E) while cfme_appliance.status.state == 'image_locked': time.sleep(10) cfme_appliance = api.vms.get(name=kwargs['vm_name']) for disk in kwargs['disks']: disk_size = kwargs['disks'][disk]['size'] * GB interface_type = kwargs['disks'][disk]['interface'] disk_format = kwargs['disks'][disk]['format'] allocation = kwargs['disks'][disk]['allocation'] location = kwargs['disks'][disk]['location'] store = api.storagedomains.get(name=location) domain = params.StorageDomains(storage_domain=[store]) disk_param = params.Disk(description=disk, storage_domains=domain, size=disk_size, interface=interface_type, format=disk_format, type_=allocation) new_disk = cfme_appliance.disks.add(disk=disk_param) if len(kwargs['appliance_nics']) > 0: current_nics = cfme_appliance.get_nics().list() current_networks = [] for nic in current_nics: network_id = nic.get_network().id current_networks.append(api.networks.get(id=network_id).name) new_set = set(kwargs['appliance_nics']) current_set = set(current_networks) appliance_nics = list(new_set - current_set) for i in range(len(appliance_nics)): network_name = params.Network(name=appliance_nics[i]) nic_name = params.NIC(name='nic{}'.format(i + 1), network=network_name) cfme_appliance.nics.add(nic=nic_name) while locked_disks(cfme_appliance): time.sleep(10) cfme_appliance = api.vms.get(name=kwargs['vm_name']) dev = params.Boot(dev='network') cfme_appliance.os.boot.append(dev) # boot_params = { # # "ks": "http://<satellite-server>/ks", # # "ksdevice": boot_if, # # "dns": "1.2.3.4,1.2.3.5", # # "ip": "10.9.8.7", # # "netmask": "255.255.255.0", # # "gateway": "10.9.8.1", # "hostname": "{0}.my.domain".format(VM_NAME) # } # cmdline = " ".join(map("{0[0]}={0[1]}".format, boot_params.iteritems())) # cfme_appliance.set_os(params.OperatingSystem( # # kernel="iso://vmlinuz", # # initrd="iso://initrd.img", # cmdline=cmdline) # ) cfme_appliance.update() for disk in cfme_appliance.disks.list(): if disk.description in appliance['disks'] \ or already_moved(kwargs['domain_object'], disk): continue disk.move(action=kwargs['actions']) cfme_appliance = api.vms.get(name=kwargs['vm_name']) while locked_disks(cfme_appliance): time.sleep(10) cfme_appliance = api.vms.get(name=kwargs['vm_name']) cfme_appliance.start()
def trigger_add_vm(**kwargs): vm_params = params.VM(name=kwargs['vm_name'], template=kwargs['template_object'], disks=kwargs['template_disks'], cluster=kwargs['cluster_object'], host=kwargs['host_object'], cpu=kwargs['cpu_object'], memory=kwargs['appliance_memory'] * GB, placement_policy=kwargs['placement_object'], type_=kwargs['appliance_type']) try: cfme_appliance = api.vms.add(vm_params) except RequestError as E: print("Error while creating vm(s)") sys.exit(E) while cfme_appliance.status.state == 'image_locked': time.sleep(10) cfme_appliance = api.vms.get(name=kwargs['vm_name']) for disk in kwargs['disks']: disk_size = kwargs['disks'][disk]['size'] * GB interface_type = kwargs['disks'][disk]['interface'] disk_format = kwargs['disks'][disk]['format'] allocation = kwargs['disks'][disk]['allocation'] location = kwargs['disks'][disk]['location'] store = api.storagedomains.get(name=location) domain = params.StorageDomains(storage_domain=[store]) disk_param = params.Disk(description=disk, storage_domains=domain, size=disk_size, interface=interface_type, format=disk_format, type_=allocation) new_disk = cfme_appliance.disks.add(disk=disk_param) if len(kwargs['appliance_nics']) > 0: current_nics = cfme_appliance.get_nics().list() current_networks = [] for nic in current_nics: network_id = nic.get_network().id current_networks.append(api.networks.get(id=network_id).name) new_set = set(kwargs['appliance_nics']) current_set = set(current_networks) appliance_nics = list(new_set - current_set) for i in range(len(appliance_nics)): network_name = params.Network(name=appliance_nics[i]) nic_name = params.NIC(name='card{}'.format(i), network=network_name) cfme_appliance.nics.add(nic=nic_name) while locked_disks(cfme_appliance): time.sleep(10) cfme_appliance = api.vms.get(name=kwargs['vm_name']) dev = params.Boot(dev='network') cfme_appliance.os.boot.append(dev) cfme_appliance.update() for disk in cfme_appliance.disks.list(): if disk.description in appliance['disks'] \ or already_moved(kwargs['domain_object'], disk): continue disk.move(action=kwargs['actions']) cfme_appliance = api.vms.get(name=kwargs['vm_name']) while locked_disks(cfme_appliance): time.sleep(10) cfme_appliance = api.vms.get(name=kwargs['vm_name']) cfme_appliance.start()
def main(argv): usage = "backup.py -c <config.cfg>" try: opts, args = getopt(argv, "hc:d") debug = False if not opts: print usage sys.exit(1) for opt, arg in opts: if (opt == "-h") or (opt == "--help"): print usage sys.exit(0) elif opt in ("-c"): config_file = arg elif opt in ("-d"): debug = True except GetoptError: print usage sys.exit(1) global config config = Config(config_file, debug) time_start = int(time.time()) has_errors = False # Connect to server connect() # Test if all VM names are valid for vm_from_list in config.get_vm_names(): if not api.vms.get(vm_from_list): print "!!! There are no VM with the following name in your cluster: " + vm_from_list api.disconnect() sys.exit(1) vms_with_failures = list(config.get_vm_names()) for vm_from_list in config.get_vm_names(): config.clear_vm_suffix() vm_clone_name = vm_from_list + config.get_vm_middle( ) + config.get_vm_suffix() # Check VM name length limitation length = len(vm_clone_name) if length > config.get_vm_name_max_length(): print "!!! VM name with middle and suffix are to long (size: " + str( length) + ", allowed " + str( config.get_vm_name_max_length()) + ") !!!" Logger.log("VM name: " + vm_clone_name) api.disconnect() sys.exit(1) Logger.log("Start backup for: " + vm_from_list) try: # Get the VM vm = api.vms.get(vm_from_list) # Cleanup: Delete the cloned VM VMTools.delete_vm(api, config, vm_from_list) # Delete old backup snapshots VMTools.delete_snapshots(vm, config, vm_from_list) # Determine disks to snapshot vm_disks = [] try: config_disks = config.get_vm_disks()[vm_from_list] except KeyError: config_disks = None for vm_disk in vm.disks.list(): if config_disks is None or vm_disk.get_name() in config_disks: vm_disks.append(vm_disk) # Create a VM snapshot: try: Logger.log("Snapshot creation started ...") if not config.get_dry_run(): vm.snapshots.add( params.Snapshot( description=config.get_snapshot_description(), vm=vm, disks=params.Disks(disk=vm_disks))) VMTools.wait_for_snapshot_operation(vm, config, "creation") Logger.log("Snapshot created") except Exception as e: Logger.log("Can't create snapshot for VM: " + vm_from_list) Logger.log("DEBUG: " + str(e)) has_errors = True continue # Clone the snapshot into a VM snapshots = vm.snapshots.list( description=config.get_snapshot_description()) if not snapshots: Logger.log("!!! No snapshot found") has_errors = True continue snapshot = snapshots[0] snapshot_param = params.Snapshot(id=snapshot.id) snapshots_param = params.Snapshots(snapshot=[snapshot_param], collapse_snapshots=True) if config.get_vm_clone_domain() is not None: clone_sd = api.storagedomains.get( name=config.get_vm_clone_domain()) if not clone_sd: Logger.log( "!!! Unknown storage domain value for vm_clone_domain") has_errors = True continue vm_clone_disks = [] for disk in snapshot.disks.list(): vm_clone_disks.append( params.Disk(image_id=disk.get_id(), storage_domains=params.StorageDomains( storage_domain=[clone_sd]))) else: vm_clone_disks = snapshot.disks.list() Logger.log("Clone into VM started ...") if not config.get_dry_run(): api.vms.add( params.VM(name=vm_clone_name, memory=vm.get_memory(), cluster=api.clusters.get( config.get_cluster_name()), snapshots=snapshots_param, disks=params.Disks(disk=vm_clone_disks))) VMTools.wait_for_vm_operation(api, config, "Cloning", vm_from_list) Logger.log("Cloning finished") # Delete backup snapshots VMTools.delete_snapshots(vm, config, vm_from_list) # Delete old backups VMTools.delete_old_backups(api, config, vm_from_list) # Export the VM try: vm_clone = api.vms.get(vm_clone_name) Logger.log("Export started ...") if not config.get_dry_run(): vm_clone.export( params.Action(storage_domain=api.storagedomains.get( config.get_export_domain()))) VMTools.wait_for_vm_operation(api, config, "Exporting", vm_from_list) Logger.log("Exporting finished") except Exception as e: Logger.log("Can't export cloned VM (" + vm_clone_name + ") to domain: " + config.get_export_domain()) Logger.log("DEBUG: " + str(e)) has_errors = True continue # Delete the VM VMTools.delete_vm(api, config, vm_from_list) time_end = int(time.time()) time_diff = (time_end - time_start) time_minutes = int(time_diff / 60) time_seconds = time_diff % 60 Logger.log("Duration: " + str(time_minutes) + ":" + str(time_seconds) + " minutes") Logger.log("VM exported as " + vm_clone_name) Logger.log("Backup done for: " + vm_from_list) vms_with_failures.remove(vm_from_list) except errors.ConnectionError as e: Logger.log("!!! Can't connect to the server" + str(e)) connect() continue except errors.RequestError as e: Logger.log("!!! Got a RequestError: " + str(e)) has_errors = True continue except Exception as e: Logger.log("!!! Got unexpected exception: " + str(e)) api.disconnect() sys.exit(1) Logger.log("All backups done") if vms_with_failures: Logger.log("Backup failured for:") for i in vms_with_failures: Logger.log(" " + i) if has_errors: Logger.log( "Some errors occured during the backup, please check the log file") api.disconnect() sys.exit(1) # Disconnect from the server api.disconnect()
def create_vm(vmprefix,disksize, storagedomain,network, vmcores,vmsockets,addstorage): print ("------------------------------------------------------") print ("Creating", num, "RHEV based virtual machines") print ("-------------------------------------------------------") for machine in range(0,int(num)): try: vm_name = str(vmprefix) + "_" + str(machine) + "_sockets_" + str(vmsockets) vm_memory = int(memory)*1024*1024*1024 vm_cluster = api.clusters.get(name=cluster) vm_template = api.templates.get(name=vmtemplate) vm_os = params.OperatingSystem(boot=[params.Boot(dev="hd")]) cpu_params = params.CPU(topology=params.CpuTopology(sockets=vmsockets,cores=vmcores)) # set proper VM parameters - based on will VM be on "thin" disk or "preallocated" disk if vmdiskpreallocated == "yes": vm_params = params.VM(name=vm_name,memory=vm_memory,cluster=vm_cluster,template=vm_template,os=vm_os,cpu=cpu_params, disks=params.Disks(clone=True)) elif vmdiskpreallocated == "no": vm_params = params.VM(name=vm_name,memory=vm_memory,cluster=vm_cluster, template=vm_template, os=vm_os,cpu=cpu_params) print ("creating virtual machine", vm_name) api.vms.add(vm=vm_params) api.vms.get(vm_name).nics.add(params.NIC(name=nicname, network=params.Network(name=network), interface='virtio')) # update vm and add disk to it wait_vm_state(vm_name,"down") print ("Virtual machine created: ", vm_name, "and it has parameters"," memory:", memory,"[GB]", " cores:", vmcores, " sockets", vmsockets, " waiting on machine to unlock so we proceed with configuration") wait_vm_state(vm_name, "down") diskname = "disk_" + str(vmprefix) + str(machine) # if there is necessary to add additional disk to VM - can be preallocated or thin if addstorage == "yes" and diskpreallocated == "no": for disk in range(0,int(numdisks)): # add one disk at time - one will be added by default - only add thin disks api.vms.get(vm_name).disks.add(params.Disk(name=diskname + "_" + str(disk), storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name=storagedomain)]), size=int(disksize)*1024*1024*1024, status=None, interface='virtio', format='cow', sparse=True, bootable=False)) print ("Disk of size:",disksize,"GB originating from", storagedomain, "storage domain is attached to VM - but we cannot start machine before disk is in OK state" " starting machine with disk attached to VM and same time having disk in Locked state will result in machine start failure") wait_disk_state(diskname + "_" + str(disk) ,"ok") print ("Machine", vm_name, "is ready to be started") api.vms.get(vm_name).start() print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]", "cores:", vmcores, " sockets", vmsockets, " storage disk", disksize, "[GB]") elif addstorage == "yes" and diskpreallocated == "yes": for disk in range(0, int(numdisks)): api.vms.get(vm_name).disks.add(params.Disk(name=diskname + "_" + str(disk) , storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name=storagedomain)]), size=int(disksize)*1024*1024*1024, status=None, interface='virtio', format='raw', sparse=False, bootable=False )) # if disk is not in "OK" state ... wait here - we cannot start machine if this is not the case print ("Disk of size:",disksize,"GB originating from", storagedomain, "storage domain is attached to VM - but we cannot start machine before disk is in OK state" " starting machine with disk attached to VM and same time having disk in Locked state will result in machine start failure") wait_disk_state(diskname + "_" + str(disk) ,"ok") print ("Machine", vm_name, "is ready to be started") api.vms.get(vm_name).start() print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]" " cores:", vmcores, " sockets", vmsockets, " storage disk", disksize, "[GB]" ) elif addstorage == "no": print ("addstorage=no was specified for", vm_name,"no additional disk will be added, starting VM:", vm_name) api.vms.get(vm_name).start() print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]" "cores:", vmcores, "sockets:", vmsockets, "storage_disk", disksize, "[GB]" ) except Exception as e: print ("Adding virtual machine '%s' failed: %s", vm_name, e)
def add(self, memory, disk_size, cluster_name, storage_name, nic_name='eth0', network_interface='virtio', network_name='ovirtmgmt', disk_interface='virtio', disk_format='raw', template_name='Blank', timeout=300): """ Create VM with one NIC and one Disk. :param memory: VM's memory size such as 1024*1024*1024=1GB. :param disk_size: VM's disk size such as 512*1024=512MB. :param nic_name: VM's NICs name such as 'eth0'. :param network_interface: VM's network interface such as 'virtio'. :param network_name: network such as ovirtmgmt for ovirt, rhevm for rhel. :param disk_format: VM's disk format such as 'raw' or 'cow'. :param disk_interface: VM's disk interface such as 'virtio'. :param cluster_name: cluster name. :param storage_name: storage domain name. :param template_name: VM's template name, default is 'Blank'. :param timeout: Time out """ end_time = time.time() + timeout # network name is ovirtmgmt for ovirt, rhevm for rhel. vm_params = param.VM(name=self.name, memory=memory, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) storage = self.api.storagedomains.get(storage_name) storage_params = param.StorageDomains(storage_domain=[storage]) nic_params = param.NIC(name=nic_name, network=param.Network(name=network_name), interface=network_interface) disk_params = param.Disk(storage_domains=storage_params, size=disk_size, type_='system', status=None, interface=disk_interface, format=disk_format, sparse=True, bootable=True) try: logging.info('Creating a VM %s' % self.name) self.api.vms.add(vm_params) logging.info('NIC is added to VM %s' % self.name) self.instance.nics.add(nic_params) logging.info('Disk is added to VM %s' % self.name) self.instance.disks.add(disk_params) logging.info('Waiting for VM to reach <Down> status') vm_down = False while time.time() < end_time: if self.is_dead(): vm_down = True break time.sleep(1) if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) except Exception as e: logging.error('Failed to create VM with disk and NIC\n%s' % str(e))
vmparams = params.VM( os=params.OperatingSystem(type_=options.osver), cpu=params.CPU(topology=params.CpuTopology(cores=int(options.vmcpu))), name=options.name, memory=1024 * 1024 * 1024 * int(options.vmmem), cluster=api.clusters.get(name=options.cluster), template=api.templates.get(name="Blank"), type_="server") vmdisk = params.Disk( size=1024 * 1024 * 1024 * int(options.sdsize), wipe_after_delete=True, sparse=True, interface="virtio", type_="System", format="cow", storage_domains=params.StorageDomains( storage_domain=[api.storagedomains.get(name="data_domain")])) vmnet = params.NIC() network_gest = params.Network(name=options.vmgest) network_serv = params.Network(name=options.vmserv) nic_gest = params.NIC(name='eth0', network=network_gest, interface='virtio') nic_serv = params.NIC(name='eth1', network=network_serv, interface='virtio') try: api.vms.add(vmparams) except:
from rhev_functions import * baseurl = "https://%s:%s" % (options.server, options.port) api = API(url=baseurl, username=options.username, password=options.password, insecure=True) try: value = api.hosts.list() except: print "Error accessing RHEV-M api, please check data and connection and retry" sys.exit(1) # Define VM based on parameters if __name__ == "__main__": vmparams = params.VM(os=params.OperatingSystem(type_=options.osver), cpu=params.CPU(topology=params.CpuTopology(cores=int(options.vmcpu))), name=options.name, memory=1024 * 1024 * 1024 * int(options.vmmem), cluster=api.clusters.get(name=options.cluster), template=api.templates.get(name="Blank"), type_="server") vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(options.sdsize), wipe_after_delete=True, sparse=True, interface="virtio", type_="System", format="cow", storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name="data_domain")])) vmnet = params.NIC() network_gest = params.Network(name=options.vmgest) network_serv = params.Network(name=options.vmserv) nic_gest = params.NIC(name='eth0', network=network_gest, interface='virtio') nic_serv = params.NIC(name='eth1', network=network_serv, interface='virtio') try: api.vms.add(vmparams) except: print "Error creating VM with specified parameters, recheck" sys.exit(1) if options.verbosity > 1:
def create(self, name, clu, numcpu, numinterfaces, netinterface, diskthin1, disksize1, diskinterface, memory, storagedomain, guestid, net1, net2=None, net3=None, net4=None, mac1=None, mac2=None, launched=True, iso=None, diskthin2=None, disksize2=None, vnc=False): boot1, boot2 = 'hd', 'network' if iso in ["", "xx", "yy"]: iso = None if iso: boot2 = 'cdrom' api = self.api memory = memory * MB disksize1 = disksize1 * GB if disksize2: disksize2 = disksize2 * GB #VM CREATION IN OVIRT #TODO check that clu and storagedomain exist and that there is space there diskformat1, diskformat2 = 'raw', 'raw' sparse1, sparse2 = False, False if diskthin1: diskformat1 = 'cow' sparse1 = True if disksize2 and diskthin2: diskformat2 = 'cow' sparse2 = True vm = api.vms.get(name=name) if vm: return "VM %s allready existing.Leaving...\n" % name clu = api.clusters.get(name=clu) storagedomain = api.storagedomains.get(name=storagedomain) try: disk1 = params.Disk(storage_domains=params.StorageDomains( storage_domain=[storagedomain]), name="%s_Disk1" % (name), size=disksize1, type_='system', status=None, interface=diskinterface, format=diskformat1, sparse=sparse1, bootable=True) disk1 = api.disks.add(disk1) disk1id = disk1.get_id() except: return "Insufficient space in storage domain for disk1.Leaving...\n" if disksize2: try: disk2 = params.Disk(storage_domains=params.StorageDomains( storage_domain=[storagedomain]), name="%s_Disk2" % (name), size=disksize2, type_='system', status=None, interface=diskinterface, format=diskformat2, sparse=sparse2, bootable=False) disk2 = api.disks.add(disk2) disk2id = disk2.get_id() except: return "Insufficient space in storage domain for disk2.Leaving...\n" #boot order boot = [params.Boot(dev=boot1), params.Boot(dev=boot2)] #vm creation kernel, initrd, cmdline = None, None, None if vnc: display = params.Display(type_='vnc') else: display = params.Display(type_='spice') api.vms.add( params.VM( name=name, memory=memory, cluster=clu, display=display, template=api.templates.get('Blank'), os=params.OperatingSystem(type_=guestid, boot=boot, kernel=kernel, initrd=initrd, cmdline=cmdline), cpu=params.CPU(topology=params.CpuTopology(cores=numcpu)), type_="server")) #add nics api.vms.get(name).nics.add( params.NIC(name='eth0', network=params.Network(name=net1), interface=netinterface)) if numinterfaces >= 2: api.vms.get(name).nics.add( params.NIC(name='eth1', network=params.Network(name=net2), interface=netinterface)) #compare eth0 and eth1 to get sure eth0 has a lower mac eth0ok = True maceth0 = api.vms.get(name).nics.get(name="eth0").mac.address maceth1 = api.vms.get(name).nics.get(name="eth1").mac.address eth0 = maceth0.split(":") eth1 = maceth1.split(":") for i in range(len(eth0)): el0 = int(eth0[i], 16) el1 = int(eth1[i], 16) if el0 == el1: pass elif el0 > el1: eth0ok = False if not eth0ok: tempnic = "00:11:11:11:11:11" nic = api.vms.get(name).nics.get(name="eth0") nic.mac.address = tempnic nic.update() nic = api.vms.get(name).nics.get(name="eth1") nic.mac.address = maceth0 nic.update() nic = api.vms.get(name).nics.get(name="eth0") nic.mac.address = maceth1 nic.update() if mac1: nic = api.vms.get(name).nics.get(name="eth0") if not ":" in mac1: mac1 = "%s%s" % (nic.mac.address[:-2], mac1) nic.mac.address = mac1 nic.update() if mac2: nic = api.vms.get(name).nics.get(name="eth1") if not ":" in mac2: mac2 = "%s%s" % (nic.mac.address[:-2], mac2) nic.mac.address = mac2 nic.update() if numinterfaces >= 3: api.vms.get(name).nics.add( params.NIC(name='eth2', network=params.Network(name=net3), interface=netinterface)) if numinterfaces >= 4: api.vms.get(name).nics.add( params.NIC(name='eth3', network=params.Network(name=net4), interface=netinterface)) api.vms.get(name).update() if iso: iso = checkiso(api, iso) cdrom = params.CdRom(file=iso) api.vms.get(name).cdroms.add(cdrom) while api.disks.get(id=disk1id).get_status().get_state() != "ok": time.sleep(5) api.vms.get(name).disks.add(disk1) while not api.vms.get(name).disks.get(id=disk1id): time.sleep(2) api.vms.get(name).disks.get(id=disk1id).activate() if disksize2: while api.disks.get(id=disk2id).get_status().get_state() != "ok": time.sleep(5) api.vms.get(name).disks.add(disk2) while not api.vms.get(name).disks.get(id=disk2id): time.sleep(2) api.vms.get(name).disks.get(id=disk2id).activate() #retrieve MACS for cobbler vm = api.vms.get(name=name) for nic in vm.nics.list(): self.macaddr.append(nic.mac.address)
def makeTemplate(self, name, comments, machineId, clusterId, storageId, displayType): ''' Publish the machine (makes a template from it so we can create COWs) and returns the template id of the creating machine Args: name: Name of the machine (care, only ascii characters and no spaces!!!) machineId: id of the machine to be published clusterId: id of the cluster that will hold the machine storageId: id of the storage tuat will contain the publication AND linked clones displayType: type of display (for oVirt admin interface only) Returns Raises an exception if operation could not be acomplished, or returns the id of the template being created. ''' logger.debug( "n: {0}, c: {1}, vm: {2}, cl: {3}, st: {4}, dt: {5}".format( name, comments, machineId, clusterId, storageId, displayType)) try: lock.acquire(True) api = self.__getApi() cluster = api.clusters.get(id=clusterId) vm = api.vms.get(id=machineId) if vm is None: raise Exception('Machine not found') if cluster is None: raise Exception('Cluster not found') if vm.get_status().get_state() != 'down': raise Exception('Machine must be in down state to publish it') print(vm.disks.list()) # Create disks description to be created in specified storage domain, one for each disk sd = params.StorageDomains( storage_domain=[params.StorageDomain(id=storageId)]) fix = not self._isFullyFunctionalVersion(api)[ 0] # If we need a fix for "publish" dsks = [] for dsk in vm.disks.list(): dsks.append( params.Disk(id=dsk.get_id(), storage_domains=sd, alias=dsk.get_alias())) # dsks.append(dsk) disks = params.Disks(disk=dsks) # Create display description # display = params.Display(type_=displayType) # TODO: Restore proper template creation mechanism if fix is True: vm = params.VM(id=vm.get_id()) else: vm = params.VM(id=vm.get_id(), disks=disks) template = params.Template( name=name, vm=vm, cluster=params.Cluster(id=cluster.get_id()), description=comments) # display=display) return api.templates.add(template).get_id() finally: lock.release()
disks['systemdg_001'] = disk disks['systemdg_002'] = disk disks['systemdg_003'] = disk disks['systemdg_004'] = disk disk = None apiurl = "https://" + ovirt_host + "/api" api = API(url=apiurl, username='******', password='******', insecure=True) for disk in disks: print "Creating disk '%s' with size '%sGB'" % (disk, disks[disk]['size']) api.vms.get(guest_name).disks.add( params.Disk(storage_domains=params.StorageDomains( storage_domain=[api.storagedomains.get(datastore)]), size=int(disks[disk]['size']) * 1024 * 1024 * 1024, status=None, interface='virtio', format='raw', sparse=False, bootable=False, shareable=True, alias=guest_name + '_' + disk)) print " - Waiting for disk to be fully allocated" while api.vms.get(guest_name).disks.get(name=guest_name + '_' + disk).status.state != 'ok': sleep(1) print " - OK" print "All disks created." print "Activating disks"
newVm.initialization.configuration.set_data(configuration_data) my_vm = api.vms.add(newVm) DOMAIN_NAME = 'sdffds' MB = 1024 * 1024 INTERFACE = 'virtio' FORMAT = 'qcow' #Create a disk to restore the data to and attach it to that VM storage_domain = api.storagedomains.get(DOMAIN_NAME) #Find the VM that has access to the backup: vm_backup_access = api.vms.get(VM_THAT_PERFORM_BACKUP) #Create a new disk and attach it to the VM with access to the backed up data. target_storage = params.StorageDomains(storage_domain={storage_domain}) created_disk = vm_backup_access.disks.add( params.Disk(storage_domains=target_storage, interface='virtio', format='cow', provisioned_size=1024 * MB)) while vm_backup_access.disks.get( id=created_disk.get_id()).get_status() == 'locked': sleep(1) #Restore the data to the disk/disks. #Detach the disk from the VM with the access to the backup and attach it to the restored VM (see in the backup flow) # Bye: