def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): if vmdisk_alloc == 'thin': # define VM params vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype) # define disk params vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) # define network parameters network_net = params.Network(name=vmnetwork) nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') elif vmdisk_alloc == 'preallocated': # define VM params vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype) # define disk params vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) # define network parameters network_net = params.Network(name=vmnetwork) nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') try: conn.vms.add(vmparams) except: print "Error creating VM with specified parameters" sys.exit(1) vm = conn.vms.get(name=vmname) try: vm.disks.add(vmdisk) except: print "Error attaching disk" try: vm.nics.add(nic_net1) except: print "Error adding nic"
def deploy_template(self, template, *args, **kwargs): self.logger.debug(' Deploying RHEV template %s to VM %s' % (template, kwargs["vm_name"])) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': kwargs['vm_name'], 'cluster': self.api.clusters.get(kwargs['cluster']), 'template': self.api.templates.get(template) } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = params.Host(name=kwargs['placement_policy_host']) policy = params.VmPlacementPolicy(host=host, affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy vm = params.VM(**vm_kwargs) self.api.vms.add(vm) self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout) if power_on: version = self.api.get_product_info().get_full_version() cfme_template = any( template.startswith(pfx) for pfx in ["cfme-55", "s_tpl", "sprout_template"]) if cfme_template and version.startswith("3.4"): action = params.Action(vm=params.VM(initialization=params.Initialization( cloud_init=params.CloudInit(users=params.Users( user=[params.User(user_name="root", password="******")]))))) ciargs = {} ciargs['initialization'] = action self.start_vm(vm_name=kwargs['vm_name'], **ciargs) else: self.start_vm(vm_name=kwargs['vm_name']) return kwargs['vm_name']
def create_rhevm_instance(instance_name, template_name, datacenter='Default', quota='admin', cluster='Default', timeout=5): """Creates rhevm Instance from template. The assigning template should have network and storage configuration saved already. ssh_key should be added to openstack project before running automation. Else the automation will fail. The following environment variables affect this command: RHEV_USER The username of a rhevm project to login. RHEV_PASSWD The password of a rhevm project to login. RHEV_URL An url to API of rhevm project. :param instance_name: A string. RHEVM Instance name to create. :param template_name: A string. RHEVM image name from which instance to be created. :param int timeout: The polling timeout in minutes to create rhevm instance. """ rhevm_client = get_rhevm_client() template = rhevm_client.templates.get(name=template_name) datacenter = rhevm_client.datacenters.get(name=datacenter) quota = datacenter.quotas.get(name=quota) logger.info('Turning on instance {0} from template {1}. Please wait ' 'till get up ...'.format(instance_name, template_name)) rhevm_client.vms.add( params.VM(name=instance_name, cluster=rhevm_client.clusters.get(name=cluster), template=template, quota=quota)) if wait_till_rhevm_instance_status(instance_name, 'down', timeout=timeout): rhevm_client.vms.get(name=instance_name).start() if wait_till_rhevm_instance_status(instance_name, 'up', timeout=timeout): logger.info('Instance {0} is now up !'.format(instance_name)) # We can fetch the Instance FQDN only if RHEV-agent is installed. # Templates under SAT-QE datacenter includes RHEV-agents. if rhevm_client.datacenters.get(name='SAT-QE'): # get the hostname of instance vm_fqdn = rhevm_client.vms.get( name=instance_name).get_guest_info().get_fqdn() logger.info('\t Instance FQDN : %s' % (vm_fqdn)) # We need value of vm_fqdn so that we can use it with CI # For now, we are exporting it as a variable value # and source it to use via shell script file_path = "/tmp/rhev_instance.txt" with open(file_path, 'w') as f1: f1.write('export SAT_INSTANCE_FQDN={0}'.format(vm_fqdn)) rhevm_client.disconnect()
def make_vm_from_template(api, cluster, temp_template_name, temp_vm_name, provider): """Makes temporary VM from imported template. This template will be later deleted. It's used to add a new disk and to convert back to template. Args: api: API to chosen RHEVM provider. cluster: Cluster to save the temporary VM on. """ if api.vms.get(temp_vm_name) is not None: print("RHEVM:{} Warning: found another VM with this name.".format(provider)) print("RHEVM:{} Skipping this step, attempting to continue...".format(provider)) return actual_template = api.templates.get(temp_template_name) actual_cluster = api.clusters.get(cluster) params_vm = params.VM(name=temp_vm_name, template=actual_template, cluster=actual_cluster) api.vms.add(params_vm) # we must wait for the vm do become available def check_status(): status = api.vms.get(temp_vm_name).get_status() if status.state != 'down': return False return True wait_for(check_status, fail_condition=False, delay=5) # check, if the vm is really there if not api.vms.get(temp_vm_name): print("RHEVM:{} temp VM could not be provisioned".format(provider)) sys.exit(127) print("RHEVM:{} successfully provisioned temp vm".format(provider))
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None, domain=None, dns=None, rootpw=None, key=None): vm = conn.vms.get(name=vmname) use_cloud_init = False nics = None nic = None if hostname or ip or netmask or gateway or domain or dns or rootpw or key: use_cloud_init = True if ip and netmask and gateway: ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway) nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True) nics = params.Nics() nics = params.GuestNicsConfiguration(nic_configuration=[nic]) initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root', root_password=rootpw, nic_configurations=nics, dns_servers=dns, authorized_ssh_keys=key) action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization)) vm.start(action=action)
def add_vm_template(api): #TODO: Fix the exported domain generation raise SkipTest('Exported domain generation not supported yet') vm_params = params.VM( name=VM1_NAME, memory=512 * MB, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_CENTOS7, ), display=params.Display( type_='spice', ), ) api.vms.add(vm_params) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).status.state == 'down', ) disk_name = api.vms.get(VM1_NAME).disks.list()[0].name testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok' )
def add_vm_from_template(self, cluster_name, template_name='Blank', new_name='my_new_vm'): """ Create a VM from template. :param cluster_name: cluster name. :param template_name: default template is 'Blank'. :param new_name: 'my_new_vm' is a default new VM's name. """ vm_params = param.VM(name=new_name, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) try: logging.info('Creating a VM %s from template %s' % (new_name, template_name)) self.api.vms.add(vm_params) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': self.instance = self.api.vms.get(self.name) time.sleep(1) logging.info('VM was created from template successfully') except Exception, e: logging.error('Failed to create VM from template:\n%s' % str(e))
def execute(self, args): t = self._api.templates.get(args.template) assert t is not None, "Specified template '%s' not found" % args.template c = self._api.clusters.get(args.cluster) assert c is not None, "Specified cluster '%s' not found" % args.template # Determine VM name to use if args.name is not None: vm_name = args.name else: vm_name = args.template # List of existing VM names vm_names = [vm.name for vm in self._api.vms.list()] # Make sure VM name is unique count = 1 while vm_name in vm_names: vm_name = "%s-%s" % (args.template, count) count += 1 # Create VM print "Creating VM '%s'" % vm_name try: vm = params.VM( name=vm_name, template=t, cluster=c, ) result = self._api.vms.add(vm) except Exception, e: print e
def add_vm_from_template(self, cluster_name, template_name='Blank', new_name='my_new_vm', timeout=300): """ Create a VM from template. :param cluster_name: cluster name. :param template_name: default template is 'Blank'. :param new_name: 'my_new_vm' is a default new VM's name. :param timeout: Time out """ end_time = time.time() + timeout vm_params = param.VM(name=new_name, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) try: logging.info('Creating a VM %s from template %s' % (new_name, template_name)) self.api.vms.add(vm_params) logging.info('Waiting for VM to reach <Down> status') vm_down = False while time.time() < end_time: if self.is_dead(): vm_down = True break time.sleep(1) if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) logging.info('VM was created from template successfully') except Exception as e: logging.error('Failed to create VM from template:\n%s' % str(e))
def vm_run(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def add_blank_vms(api): vm_memory = 256 * MB vm_params = params.VM( memory=vm_memory, os=params.OperatingSystem(type_='other_linux', ), type_='server', high_availability=params.HighAvailability(enabled=False, ), cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy(guaranteed=vm_memory / 2, ), name=VM0_NAME) for vm in [VM0_NAME, VM2_NAME, BACKUP_VM_NAME]: vm_params.name = vm if vm == VM2_NAME: vm_params.high_availability.enabled = True vm_params.custom_emulated_machine = 'pc-i440fx-rhel7.4.0' api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(vm).status.state == 'down', )
def create_vm_template(conn, vmname, image, zone): vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True)) try: conn.vms.add(vmparams) except: print 'error adding template %s' % image sys.exit(1)
def add_vm_blank(api): vm_memory = 256 * MB vm_params = params.VM( memory=vm_memory, os=params.OperatingSystem(type_='other_linux', ), type_='server', high_availability=params.HighAvailability(enabled=False, ), cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy(guaranteed=vm_memory / 2, ), name=VM0_NAME) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', ) vm_params.name = VM2_NAME vm_params.high_availability.enabled = True api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM2_NAME).status.state == 'down', )
def add_vm_blank(api): vm_memory = 512 * MB vm_params = params.VM( name=VM0_NAME, memory=vm_memory, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_BLANK, ), display=params.Display( smartcard_enabled=True, keyboard_layout='en-us', file_transfer_enabled=True, copy_paste_enabled=True, ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ), ) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', )
def deploy_template(self, template, *args, **kwargs): self.logger.debug(' Deploying RHEV template %s to VM %s' % (template, kwargs["vm_name"])) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': kwargs['vm_name'], 'cluster': self.api.clusters.get(kwargs['cluster']), 'template': self.api.templates.get(template) } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = params.Host(name=kwargs['placement_policy_host']) policy = params.VmPlacementPolicy( host=host, affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy if 'cpu' in kwargs: vm_kwargs['cpu'] = params.CPU(topology=params.CpuTopology( cores=int(kwargs['cpu']))) if 'ram' in kwargs: vm_kwargs['memory'] = int(kwargs['ram']) * 1024 * 1024 # MB self.api.vms.add(params.VM(**vm_kwargs)) self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout) if power_on: self.start_vm(kwargs['vm_name']) return kwargs['vm_name']
def clone_snapshot(api, config, vm_from_list): """ Clone snapshot into a new vm :param api: ovirtsdk api :param config: Configuration :vm: VM to clone """ vm_clone_name = vm_from_list + config.get_vm_middle() + config.get_vm_suffix() vm = api.vms.get(vm_from_list) snapshots = vm.snapshots.list(description=config.get_snapshot_description()) if not snapshots: logger.error("!!! No snapshot found !!!") has_errors = True snapshot=snapshots[0] # Find the storage domain where the disks should be created: sd = api.storagedomains.get(name=config.get_destination_domain()) # Find the image identifiers of the disks of the snapshot, as # we need them in order to explicitly indicate that we want # them created in a different storage domain: disk_ids = [] for current in snapshot.disks.list(): disk_ids.append(current.get_id()) # Prepare the list of disks for the operation to create the # snapshot,explicitly indicating for each of them the storage # domain where it should be created: disk_list = [] for disk_id in disk_ids: disk = params.Disk( image_id=disk_id, storage_domains=params.StorageDomains( storage_domain=[ params.StorageDomain( id=sd.get_id(), ), ], ), ) disk_list.append(disk) snapshot_param = params.Snapshot(id=snapshot.id) snapshots_param = params.Snapshots(snapshot=[snapshot_param]) logger.info("Clone into VM (%s) started ..." % vm_clone_name) if not config.get_dry_run(): api.vms.add(params.VM( name=vm_clone_name, memory=vm.get_memory(), cluster=api.clusters.get(config.get_cluster_name()), snapshots=snapshots_param, disks=params.Disks( disk=disk_list, ) ) ) VMTools.wait_for_vm_operation(api, config, "Cloning", vm_from_list) logger.info("Cloning finished")
def vm_run(api): start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy(host=params.Host( name=HOSTS[0], ), ), ), ) api.vms.get(VM1_NAME).start(start_params) testlib.assert_true_within( func=lambda: api.vms.get(VM1_NAME).status.state == 'up', timeout=SHORT_TIMEOUT, )
def add(self, memory, disk_size, cluster_name, storage_name, nic_name='eth0', network_interface='virtio', network_name='ovirtmgmt', disk_interface='virtio', disk_format='raw', template_name='Blank'): """ Create VM with one NIC and one Disk. @memory: VM's memory size such as 1024*1024*1024=1GB. @disk_size: VM's disk size such as 512*1024=512MB. @nic_name: VM's NICs name such as 'eth0'. @network_interface: VM's network interface such as 'virtio'. @network_name: network such as ovirtmgmt for ovirt, rhevm for rhel. @disk_format: VM's disk format such as 'raw' or 'cow'. @disk_interface: VM's disk interface such as 'virtio'. @cluster_name: cluster name. @storage_name: storage domain name. @template_name: VM's template name, default is 'Blank'. """ # network name is ovirtmgmt for ovirt, rhevm for rhel. vm_params = param.VM(name=self.name, memory=memory, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) storage = self.api.storagedomains.get(storage_name) storage_params = param.StorageDomains(storage_domain=[storage]) nic_params = param.NIC(name=nic_name, network=param.Network(name=network_name), interface=network_interface) disk_params = param.Disk(storage_domains=storage_params, size=disk_size, type_='system', status=None, interface=disk_interface, format=disk_format, sparse=True, bootable=True) try: logging.info('Creating a VM %s' % self.name) self.api.vms.add(vm_params) logging.info('NIC is added to VM %s' % self.name) self.instance.nics.add(nic_params) logging.info('Disk is added to VM %s' % self.name) self.instance.disks.add(disk_params) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': time.sleep(1) except Exception, e: logging.error('Failed to create VM with disk and NIC\n%s' % str(e))
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[1]), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def add_vm_blank(api): vm_params = params.VM( name=VM0_NAME, memory=1 * GB, cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_BLANK, ), display=params.Display(type_='spice', ), ) api.vms.add(vm_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'down', )
def deploy_template(self, template, *args, **kwargs): self.api.vms.add( params.VM(name=kwargs['vm_name'], cluster=self.api.clusters.get(kwargs['cluster_name']), template=self.api.templates.get(template))) while self.api.vms.get(kwargs['vm_name']).status.state != 'down': time.sleep(5) self.start_vm(kwargs['vm_name']) while not self.is_vm_running(kwargs['vm_name']): time.sleep(5) return kwargs['vm_name']
def make_vm_from_template(api, cluster, temp_template_name, temp_vm_name, provider, mgmt_network=None): """Makes temporary VM from imported template. This template will be later deleted. It's used to add a new disk and to convert back to template. Args: api: API to chosen RHEVM provider. cluster: Cluster to save the temporary VM on. mgmt_network: management network on RHEVM box, its 'ovirtmgmt' by default on rhv4.0 and 'rhevm' on older RHEVM versions. temp_template_name: temporary template name created from ova temp_vm_name: temporary vm name to be created. provider: provider_key """ try: if api.vms.get(temp_vm_name) is not None: logger.info( "RHEVM:%r Warning: found another VM with this name (%r).", provider, temp_vm_name) logger.info( "RHEVM:%r Skipping this step, attempting to continue...", provider) return actual_template = api.templates.get(temp_template_name) actual_cluster = api.clusters.get(cluster) params_vm = params.VM(name=temp_vm_name, template=actual_template, cluster=actual_cluster) api.vms.add(params_vm) # we must wait for the vm do become available def check_status(): return api.vms.get(temp_vm_name).get_status().state == 'down' wait_for(check_status, fail_condition=False, delay=5, num_sec=240) if mgmt_network: vm = api.vms.get(temp_vm_name) nic = vm.nics.get('eth0') nic.network = params.Network(name=mgmt_network) nic.interface = 'virtio' nic.update() # check, if the vm is really there if not api.vms.get(temp_vm_name): logger.error("RHEVM:%r temp VM could not be provisioned", provider) sys.exit(127) logger.info("RHEVM:%r successfully provisioned temp vm", provider) except Exception: logger.exception("RHEVM:%r Make_temp_vm_from_template failed:", provider)
def create_vm(self, name, memory=locals.MEMORY, template=locals.TEMPLATE_NAME): """Creates a VM from given parameters and returns its hostname.""" show('VM creation:') show.tab() show('Name: %s' % name) show('Template: %s' % template) show('Memory: %s' % memory) tmpl = self.api.templates.get(template) if not tmpl: raise ValueError('Template does not exist: %s' % template) # # Check whether the template exist, if so, create the VM # if util.get_latest_template(self.api, template) is None: # raise ValueError('Template does not exist: %s' % template) # Set VM's parameters as defined in locals.py pars = params.VM(name=name, memory=memory, cluster=self.api.clusters.get(self.cluster), template=tmpl) # locals.HOST can be used to enforce usage of a particular host if locals.HOST: pars.set_placement_policy( params.VmPlacementPolicy(host=self.api.hosts.get(locals.HOST), affinity='pinned')) vm = self.api.vms.add(pars) show('VM was created from Template successfully') # Set corret permissions so that VM can be seen in WebAdmin if not self.kerberos: admin_vm_manager_perm = params.Permission( role=self.api.roles.get('UserVmManager'), user=self.api.users.get('admin')) vm.permissions.add(admin_vm_manager_perm) show('Permissions for admin to see VM set') # VM automatically shuts down after creation show('Waiting for VM to reach Down status') while self.get_vm_state(name, vm) != 'down': vm = self.get_vm(name) sleep(15) show.untab() return vm
def deployFromTemplate(self, name, comments, templateId, clusterId, displayType, usbType, memoryMB, guaranteedMB): ''' Deploys a virtual machine on selected cluster from selected template Args: name: Name (sanitized) of the machine comments: Comments for machine templateId: Id of the template to deploy from clusterId: Id of the cluster to deploy to displayType: 'vnc' or 'spice'. Display to use ad oVirt admin interface memoryMB: Memory requested for machine, in MB guaranteedMB: Minimum memory guaranteed for this machine Returns: Id of the machine being created form template ''' logger.debug( 'Deploying machine with name "{0}" from template {1} at cluster {2} with display {3} and usb {4}, memory {5} and guaranteed {6}' .format(name, templateId, clusterId, displayType, usbType, memoryMB, guaranteedMB)) try: lock.acquire(True) api = self.__getApi() logger.debug('Deploying machine {0}'.format(name)) cluster = params.Cluster(id=clusterId) template = params.Template(id=templateId) display = params.Display(type_=displayType) if usbType in ('native', 'legacy'): usb = params.Usb(enabled=True, type_=usbType) else: usb = params.Usb(enabled=False) memoryPolicy = params.MemoryPolicy(guaranteed=guaranteedMB * 1024 * 1024) par = params.VM(name=name, cluster=cluster, template=template, description=comments, type_='desktop', memory=memoryMB * 1024 * 1024, memory_policy=memoryPolicy, usb=usb) # display=display, return api.vms.add(par).get_id() finally: lock.release()
def _vm_args_to_params(**vm_args): # noqa - ignore mccabe warning """ Convert fabric-style simple arguments into an oVirt VM parameters structure All parameters are as defined in the 'create' task for customizing the pool VMs :returns: an oVirt VM paameters structure or None if not customization was requested :rtype: oVirtObjects.VM """ vm_args_supported = ( 'custom_serial_number', 'memory', 'memory_guaranteed', 'memory_balooning', 'vcpus', ) vm_args = dict((key, value) for key, value in vm_args.iteritems() if key in vm_args_supported and value is not None) if not vm_args: return None vm_params = oVirtParams.VM() memory = None if 'memory' in vm_args: memory = int(vm_args['memory']) vm_params.memory = memory mem_policy = None if 'memory_guaranteed' in vm_args or 'memory_balooning' in vm_args: mem_policy = oVirtParams.MemoryPolicy() if 'memory_guaranteed' in vm_args: mem_policy.guaranteed = int(vm_args['memory_guaranteed']) if 'memory_balooning' in vm_args: mem_policy.ballooning = bool(vm_args['balooning']) # oVirt sets guaranteed to 1G by default so we need to set it for smaller # VMs. This is a work-around for oVirt BZ#1333369 if memory and memory < 1 * GiB: if mem_policy is None: mem_policy = oVirtParams.MemoryPolicy(guaranteed=memory) elif mem_policy.guaranteed is None: mem_policy.guaranteed = memory vm_params.memory_policy = mem_policy if 'vcpus' in vm_args: vm_params.cpu = oVirtParams.CPU(topology=oVirtParams.CpuTopology( sockets=int(vm_args['vcpus']))) if 'custom_serial_number' in vm_args: vm_params.serial_number = oVirtParams.SerialNumber( policy='custom', value=vm_args['custom_serial_number'], ) return vm_params
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), users=params.Users( active=True, user=[params.User( user_name='root', password='******' )] ), network_configuration=params.NetworkConfiguration( nics=params.Nics( nic=[params.NIC( name='eth0', boot_protocol='STATIC', on_boot='True', network=params.Network( ip=params.IP( address='192.168.1.2.', netmask='255.255.255.0', gateway='192.168.1.1', ), ), )] ), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def add_vm_template(api): #TODO: Fix the exported domain generation. #For the time being, add VM from Glance imported template. if api.templates.get(name=TEMPLATE_CIRROS) is None: raise SkipTest('%s: template %s not available.' % (add_vm_template.__name__, TEMPLATE_CIRROS)) vm_memory = 512 * MB vm_params = params.VM( name=VM1_NAME, description='CirrOS imported from Glance as Template', memory=vm_memory, cluster=params.Cluster( name=TEST_CLUSTER, ), template=params.Template( name=TEMPLATE_CIRROS, ), display=params.Display( type_='vnc', ), memory_policy=params.MemoryPolicy( guaranteed=vm_memory / 2, ballooning=False, ), os=params.OperatingSystem( type_='other_linux', ), timezone='Etc/GMT', type_='server', serial_number=params.SerialNumber( policy='custom', value='12345678', ), cpu=params.CPU( architecture='X86_64', topology=params.CpuTopology( cores=1, threads=2, sockets=1, ), ), ) api.vms.add(vm_params) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).status.state == 'down', ) disk_name = api.vms.get(VM1_NAME).disks.list()[0].name testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok' )
def add_vm_template(api): vm_params = params.VM( name=VM1_NAME, memory=4 * GB, cluster=params.Cluster(name=TEST_CLUSTER, ), template=params.Template(name=TEMPLATE_CENTOS7, ), display=params.Display(type_='spice', ), ) api.vms.add(vm_params) testlib.assert_true_within_long( lambda: api.vms.get(VM1_NAME).status.state == 'down', ) disk_name = api.vms.get(VM1_NAME).disks.list()[0].name testlib.assert_true_within_long(lambda: api.vms.get(VM1_NAME).disks.get( disk_name).status.state == 'ok')
def _addParams(self, vmParamsQueue, vmNum): vm_cluster = self.api.clusters.get(name=self.cluster) vm_template = self.api.templates.get(name=self.template) vmList = self._getVmList() length = len(vmList) for i in range(vmNum): vm_name = self.vmName + str(i+1+length) paramsVM = params.VM(name=vm_name, cluster=vm_cluster, template=vm_template,) vm_params = {} vm_params.setdefault('vm_name', vm_name) vm_params.setdefault('paramsVM', paramsVM) vmParamsQueue.put(vm_params)
def CreateVm(vm_name, vm_type, vm_mem, vm_cluster, vm_template): vm_params = params.VM( name=vm_name, memory=vm_mem * MB, cluster=api.clusters.get(name=vm_cluster), template=api.templates.get(name=vm_template), os=params.OperatingSystem(boot=[params.Boot(dev="hd")])) vm_params.set_type(vm_type) try: api.vms.add(vm=vm_params) print "Virtual machine '%s' added." % vm_name except Exception as ex: print "Adding virtual machine '%s' failed: %s" % (vm_name, ex)