def vm_run(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def deploy_template(self, template, *args, **kwargs): self.logger.debug(' Deploying RHEV template %s to VM %s' % (template, kwargs["vm_name"])) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': kwargs['vm_name'], 'cluster': self.api.clusters.get(kwargs['cluster']), 'template': self.api.templates.get(template) } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = params.Host(name=kwargs['placement_policy_host']) policy = params.VmPlacementPolicy(host=host, affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy vm = params.VM(**vm_kwargs) self.api.vms.add(vm) self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout) if power_on: version = self.api.get_product_info().get_full_version() cfme_template = any( template.startswith(pfx) for pfx in ["cfme-55", "s_tpl", "sprout_template"]) if cfme_template and version.startswith("3.4"): action = params.Action(vm=params.VM(initialization=params.Initialization( cloud_init=params.CloudInit(users=params.Users( user=[params.User(user_name="root", password="******")]))))) ciargs = {} ciargs['initialization'] = action self.start_vm(vm_name=kwargs['vm_name'], **ciargs) else: self.start_vm(vm_name=kwargs['vm_name']) return kwargs['vm_name']
def deploy_template(self, template, *args, **kwargs): self.logger.debug(' Deploying RHEV template %s to VM %s' % (template, kwargs["vm_name"])) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': kwargs['vm_name'], 'cluster': self.api.clusters.get(kwargs['cluster']), 'template': self.api.templates.get(template) } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = params.Host(name=kwargs['placement_policy_host']) policy = params.VmPlacementPolicy( host=host, affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy if 'cpu' in kwargs: vm_kwargs['cpu'] = params.CPU(topology=params.CpuTopology( cores=int(kwargs['cpu']))) if 'ram' in kwargs: vm_kwargs['memory'] = int(kwargs['ram']) * 1024 * 1024 # MB self.api.vms.add(params.VM(**vm_kwargs)) self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout) if power_on: self.start_vm(kwargs['vm_name']) return kwargs['vm_name']
def vm_run(api): start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy(host=params.Host( name=HOSTS[0], ), ), ), ) api.vms.get(VM1_NAME).start(start_params) testlib.assert_true_within( func=lambda: api.vms.get(VM1_NAME).status.state == 'up', timeout=SHORT_TIMEOUT, )
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[1]), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), users=params.Users( active=True, user=[params.User( user_name='root', password='******' )] ), network_configuration=params.NetworkConfiguration( nics=params.Nics( nic=[params.NIC( name='eth0', boot_protocol='STATIC', on_boot='True', network=params.Network( ip=params.IP( address='192.168.1.2.', netmask='255.255.255.0', gateway='192.168.1.1', ), ), )] ), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def create_vm(self, name, memory=locals.MEMORY, template=locals.TEMPLATE_NAME): """Creates a VM from given parameters and returns its hostname.""" show('VM creation:') show.tab() show('Name: %s' % name) show('Template: %s' % template) show('Memory: %s' % memory) tmpl = self.api.templates.get(template) if not tmpl: raise ValueError('Template does not exist: %s' % template) # # Check whether the template exist, if so, create the VM # if util.get_latest_template(self.api, template) is None: # raise ValueError('Template does not exist: %s' % template) # Set VM's parameters as defined in locals.py pars = params.VM(name=name, memory=memory, cluster=self.api.clusters.get(self.cluster), template=tmpl) # locals.HOST can be used to enforce usage of a particular host if locals.HOST: pars.set_placement_policy( params.VmPlacementPolicy(host=self.api.hosts.get(locals.HOST), affinity='pinned')) vm = self.api.vms.add(pars) show('VM was created from Template successfully') # Set corret permissions so that VM can be seen in WebAdmin if not self.kerberos: admin_vm_manager_perm = params.Permission( role=self.api.roles.get('UserVmManager'), user=self.api.users.get('admin')) vm.permissions.add(admin_vm_manager_perm) show('Permissions for admin to see VM set') # VM automatically shuts down after creation show('Waiting for VM to reach Down status') while self.get_vm_state(name, vm) != 'down': vm = self.get_vm(name) sleep(15) show.untab() return vm
def run_vms(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() vm_ip = '.'.join(engine.ip().split('.')[0:3] + ['199']) vm_gw = '.'.join(engine.ip().split('.')[0:3] + ['1']) host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[0]), ), initialization=params.Initialization( domain=params.Domain(name='lago.example.com'), cloud_init=params.CloudInit( host=params.Host(address='VM0'), users=params.Users(active=True, user=[ params.User(user_name='root', password='******') ]), network_configuration=params.NetworkConfiguration( nics=params.Nics(nic=[ params.NIC( name='eth0', boot_protocol='STATIC', on_boot=True, network=params.Network(ip=params.IP( address=vm_ip, netmask='255.255.255.0', gateway=vm_gw, ), ), ) ]), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) api.vms.get(BACKUP_VM_NAME).start(start_params) start_params.vm.initialization.cloud_init = params.CloudInit( host=params.Host(address='VM2'), ) api.vms.get(VM2_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up' and api.vms.get( BACKUP_VM_NAME).status.state == 'up', )
def prepare_rhevm_template(): tmp = { 'template_disks': params.Disks(clone=appliance['clone_template']), 'cluster_object': api.clusters.get(name=appliance['cluster']), 'host_object': api.hosts.get(appliance['host']), 'migrate': appliance['migrate'], 'appliance_nics': appliance['NICS'][:], 'appliance_memory': appliance['memory_size'], 'appliance_type': appliance['vm_type'], 'num_cores': appliance['cores'], 'num_cpus': appliance['cpus'], 'storage_name': appliance['disk_location'], 'disks': appliance['disks'] } tmp['cpu_topology'] = params.CpuTopology(cores=tmp['num_cores'], threads=tmp['num_cpus']) tmp['cpu_object'] = params.CPU(topology=tmp['cpu_topology']) tmp['domain_object'] = api.storagedomains.get(name=tmp['storage_name']) tmp['actions'] = params.Action(storage_domain=tmp['domain_object']) tmp['placement_object'] = params.VmPlacementPolicy(host=tmp['host_object'], affinity=tmp['migrate']) return tmp