def _vm_args_to_params(**vm_args): # noqa - ignore mccabe warning """ Convert fabric-style simple arguments into an oVirt VM parameters structure All parameters are as defined in the 'create' task for customizing the pool VMs :returns: an oVirt VM paameters structure or None if not customization was requested :rtype: oVirtObjects.VM """ vm_args_supported = ( 'custom_serial_number', 'memory', 'memory_guaranteed', 'memory_balooning', 'vcpus', ) vm_args = dict((key, value) for key, value in vm_args.iteritems() if key in vm_args_supported and value is not None) if not vm_args: return None vm_params = oVirtParams.VM() memory = None if 'memory' in vm_args: memory = int(vm_args['memory']) vm_params.memory = memory mem_policy = None if 'memory_guaranteed' in vm_args or 'memory_balooning' in vm_args: mem_policy = oVirtParams.MemoryPolicy() if 'memory_guaranteed' in vm_args: mem_policy.guaranteed = int(vm_args['memory_guaranteed']) if 'memory_balooning' in vm_args: mem_policy.ballooning = bool(vm_args['balooning']) # oVirt sets guaranteed to 1G by default so we need to set it for smaller # VMs. This is a work-around for oVirt BZ#1333369 if memory and memory < 1 * GiB: if mem_policy is None: mem_policy = oVirtParams.MemoryPolicy(guaranteed=memory) elif mem_policy.guaranteed is None: mem_policy.guaranteed = memory vm_params.memory_policy = mem_policy if 'vcpus' in vm_args: vm_params.cpu = oVirtParams.CPU(topology=oVirtParams.CpuTopology( sockets=int(vm_args['vcpus']))) if 'custom_serial_number' in vm_args: vm_params.serial_number = oVirtParams.SerialNumber( policy='custom', value=vm_args['custom_serial_number'], ) return vm_params
def add_blank_vms(api): vm_memory = 256 * MB vm_params = params.VM( memory=vm_memory, os=params.OperatingSystem(type_='other_linux', ), type_='server', high_availability=params.HighAvailability(enabled=False, ), cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy(guaranteed=vm_memory / 2, ), name=VM0_NAME) for vm in [VM0_NAME, VM2_NAME, BACKUP_VM_NAME]: vm_params.name = vm if vm == VM2_NAME: vm_params.high_availability.enabled = True vm_params.custom_emulated_machine = 'pc-i440fx-rhel7.4.0' api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(vm).status.state == 'down', )
def add_vm_blank(api): vm_memory = 256 * MB vm_params = params.VM( memory=vm_memory, os=params.OperatingSystem(type_='other_linux', ), type_='server', high_availability=params.HighAvailability(enabled=False, ), cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy(guaranteed=vm_memory / 2, ), name=VM0_NAME) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', ) vm_params.name = VM2_NAME vm_params.high_availability.enabled = True api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM2_NAME).status.state == 'down', )
def add_vm_blank(api): vm_memory = 512 * MB vm_params = params.VM( name=VM0_NAME, memory=vm_memory, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ), ) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', )
def add_vm_blank(api): vm_memory = 512 * MB vm_params = params.VM( name=VM0_NAME, memory=vm_memory, cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display(type_='spice', ), memory_policy=params.MemoryPolicy(guaranteed=vm_memory / 2, ), ) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', )
def add_cluster(api): p = params.Cluster( name=CLUSTER_NAME, cpu=params.CPU(id=CLUSTER_CPU_FAMILY, ), version=params.Version( major=DC_VER_MAJ, minor=DC_VER_MIN, ), data_center=params.DataCenter(name=DC_NAME, ), memory_policy=params.MemoryPolicy(overcommit=params.MemoryOverCommit( percent=100)), ) nt.assert_true(api.clusters.add(p))
def deployFromTemplate(self, name, comments, templateId, clusterId, displayType, usbType, memoryMB, guaranteedMB): ''' Deploys a virtual machine on selected cluster from selected template Args: name: Name (sanitized) of the machine comments: Comments for machine templateId: Id of the template to deploy from clusterId: Id of the cluster to deploy to displayType: 'vnc' or 'spice'. Display to use ad oVirt admin interface memoryMB: Memory requested for machine, in MB guaranteedMB: Minimum memory guaranteed for this machine Returns: Id of the machine being created form template ''' logger.debug( 'Deploying machine with name "{0}" from template {1} at cluster {2} with display {3} and usb {4}, memory {5} and guaranteed {6}' .format(name, templateId, clusterId, displayType, usbType, memoryMB, guaranteedMB)) try: lock.acquire(True) api = self.__getApi() logger.debug('Deploying machine {0}'.format(name)) cluster = params.Cluster(id=clusterId) template = params.Template(id=templateId) display = params.Display(type_=displayType) if usbType in ('native', 'legacy'): usb = params.Usb(enabled=True, type_=usbType) else: usb = params.Usb(enabled=False) memoryPolicy = params.MemoryPolicy(guaranteed=guaranteedMB * 1024 * 1024) par = params.VM(name=name, cluster=cluster, template=template, description=comments, type_='desktop', memory=memoryMB * 1024 * 1024, memory_policy=memoryPolicy, usb=usb) # display=display, return api.vms.add(par).get_id() finally: lock.release()
def add_vm_template(api): #TODO: Fix the exported domain generation. #For the time being, add VM from Glance imported template. if api.templates.get(name=TEMPLATE_CIRROS) is None: raise SkipTest('%s: template %s not available.' % (add_vm_template.__name__, TEMPLATE_CIRROS)) vm_memory = 512 * MB vm_params = params.VM( name=VM1_NAME, description='CirrOS imported from Glance as Template', memory=vm_memory, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_CIRROS, ), display=params.Display( type_='vnc', ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ballooning=False, ), os=params.OperatingSystem( type_='other_linux', ), timezone='Etc/GMT', type_='server', serial_number=params.SerialNumber( policy='custom', value='12345678', ), cpu=params.CPU( architecture='X86_64', topology=params.CpuTopology( cores=1, threads=2, sockets=1, ), ), ) api.vms.add(vm_params) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).status.state == 'down', ) disk_name = api.vms.get(VM1_NAME).disks.list()[0].name testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok' )
sys.exit(1) logDebug( "Using template %s" %( templatename ) ) # check if vmname already exist EXIT_ON = "CHECKVMNAME" checkVMName(VMNAME) #now try to create a new vm try: logDebug( "Creating VM %s..." %( VMNAME ) ) sdesc = "Created by addNewVM.py" # 70% memory guaranteed mguaranteed = int(MEMORY*GB*0.70) api.vms.add(params.VM(name=VMNAME, memory=MEMORY*GB, cluster=api.clusters.get(CLUSTER), template=api.templates.get(templatename), description=sdesc, memory_policy=params.MemoryPolicy(guaranteed=mguaranteed), disks=params.Disks(clone=False) )) logDebug( "VM %s created, waiting to disk allocation (preallocated disk)" %( VMNAME ) ) #now wait until is down vm = api.vms.get(name=VMNAME) while ( vm.get_status().state != 'down' ): logDebug( "VM %s is on state %s, sleeping %s seconds" %( vm.get_name(), vm.get_status().state, str( SLEEPTIME ) ) ) sleep(SLEEPTIME) vm = api.vms.get(name=VMNAME) except Exception, err: logDebug( "Error on creating a new vm %s" %( VMNAME ), 2 ) logDebug( Exception, 2) logDebug( err, 2)