def remove_disk(vm_id, disk_id=None, disk_name=None, erase='no', ovirt=None): """ Remove the specified disk from the specified VM :param str vm_id: The ID of the VM to add the disk to :param str disk_id: The Id of the disk to remove :param str disk_name`: The name of the disk to remove :param str erase: 'yes' to erase the removed disks from the system, anything else to leave them detached. default is 'no' :param oVirtApi ovirt: An open oVirt API connection One of disk_id or disk_name must be specified, if both are specified, disk_id will be used """ vm = ovirt.vms.get(id=vm_id) if vm is None: abort("VM with specified ID '{0}' not found".format(vm_id)) if disk_id is not None: disk = vm.disks.get(id=disk_id) elif disk_name is not None: disk = vm.disks.get(name=disk_name) else: abort('Niether disk_id nor disk_name specified') if not disk: abort("Disk with specified ID or name not found") if disk.active: puts("Deactivating disk: {0}".format(disk.name)) disk.deactivate(oVirtParams.Action(async=False)) if erase == 'yes': puts("Erasing disk: {0}".format(disk.name)) disk.delete(oVirtParams.Action(detach=False, async=False)) else: puts("Detaching disk: {0}".format(disk.name)) disk.delete(oVirtParams.Action(detach=True, async=False))
def export_vm(self, new_name, export, collapse): try: if collapse == 'False': self.api.vms.get(name=new_name).export( params.Action(storage_domain=export, force=True)) self.__wait(new_name, 1) elif collapse == 'True': self.api.vms.get(name=new_name).export( params.Action(storage_domain=export, force=True, discard_snapshots=True)) self.__wait(new_name, 1) except Exception as e: print(e.message) raise Exception(14)
def import_from_export_domain(self, export_name, storage_name, cluster_name, timeout=300): """ Import a VM from export domain to data domain. :param export_name: Export domain name. :param storage_name: Storage domain name. :param cluster_name: Cluster name. """ end_time = time.time() + timeout vm = self.lookup_by_storagedomains(export_name) storage_domains = self.api.storagedomains.get(storage_name) clusters = self.api.clusters.get(cluster_name) logging.info('Import VM %s' % self.name) vm.import_vm( param.Action(storage_domain=storage_domains, cluster=clusters)) logging.info('Waiting for VM to reach <Down> status') vm_down = False while time.time() < end_time: if self.name in self.list(): if self.is_dead(): vm_down = True break time.sleep(1) if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) logging.info('Import %s successfully', self.name)
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None, domain=None, dns=None, rootpw=None, key=None): vm = conn.vms.get(name=vmname) use_cloud_init = False nics = None nic = None if hostname or ip or netmask or gateway or domain or dns or rootpw or key: use_cloud_init = True if ip and netmask and gateway: ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway) nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True) nics = params.Nics() nics = params.GuestNicsConfiguration(nic_configuration=[nic]) initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root', root_password=rootpw, nic_configurations=nics, dns_servers=dns, authorized_ssh_keys=key) action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization)) vm.start(action=action)
def generic_import_from_glance(api, image_name=CIRROS_IMAGE_NAME, as_template=False, image_ext='_glance_disk', template_ext='_glance_template', dest_storage_domain=MASTER_SD_TYPE, dest_cluster=CLUSTER_NAME): glance_provider = api.storagedomains.get(SD_GLANCE_NAME) target_image = glance_provider.images.get(name=image_name) disk_name = image_name.replace(" ", "_") + image_ext template_name = image_name.replace(" ", "_") + template_ext import_action = params.Action( storage_domain=params.StorageDomain( name=dest_storage_domain, ), cluster=params.Cluster( name=dest_cluster, ), import_as_template=as_template, disk=params.Disk( name=disk_name, ), template=params.Template( name=template_name, ), ) nt.assert_true( target_image.import_image(import_action) ) testlib.assert_true_within_long( lambda: api.disks.get(disk_name).status.state == 'ok', )
def vm_run(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def attach_detach_disk(vm, disk, new_disk): print("[{}] Attaching the '{}' Cinder volume to the VM...".format( vm.name, disk.name)) vm.disks.add(params.Disk(id=new_disk.id, active=True)) print("[{}] Detaching the '{}' NFS volume from the VM...".format( vm.name, disk.name)) disk.delete(action=params.Action(detach=True))
def execute_import(self): # We import to the master storage domain of the datacenter of which our export domain is a member # Got it? action = params.Action() sds = self.dc_object.storagedomains.list() for sd in sds: if sd.get_master(): action.storage_domain=sd if not action.storage_domain: raise Exception("Could not find master storage domain for datacenter ID (%s)" % (self.dc_object.get_id())) action.cluster = self.cluster_object # At this point our freshly copied in files are discoverable via the tpl_uuid in our export domain template = self.storage_domain_object.templates.get(id=str(self.tpl_uuid)) if template: template.import_template(action=action) real_template = self.api.templates.get(id=str(self.tpl_uuid)) # Wait 5 minutes for an import to finish self.log.debug("Waiting for template import to complete") for i in range(30): self.log.debug("Waited %d - state (%s)" % (i*10, real_template.get_status().get_state())) if real_template.get_status().get_state() != 'locked': break real_template = real_template.update() sleep(10) self.log.debug("Deleting export domain files") self.remove_export_template() final_state = real_template.get_status().get_state() if final_state == 'ok': self.log.debug("Template import completed successfully") return elif final_state == 'locked': raise Exception("Timed out waiting for template import to finish") else: raise Exception("Template import ended in unknown state (%s)" % (final_state))
def do_fence_host(api, name): try: api.hosts.get(name).fence(action=params.Action(fence_type='manual')) return 1 except RequestError as e: print(e.reason) return 0
def vm_migrate(api): migrate_params = params.Action(host=params.Host(name=HOSTS[1], ), ) api.vms.get(VM1_NAME).migrate(migrate_params) testlib.assert_true_within( func=lambda: api.vms.get(VM1_NAME).status.state == 'up', timeout=SHORT_TIMEOUT, )
def deploy_template(self, template, *args, **kwargs): self.logger.debug(' Deploying RHEV template %s to VM %s' % (template, kwargs["vm_name"])) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': kwargs['vm_name'], 'cluster': self.api.clusters.get(kwargs['cluster']), 'template': self.api.templates.get(template) } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = params.Host(name=kwargs['placement_policy_host']) policy = params.VmPlacementPolicy(host=host, affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy vm = params.VM(**vm_kwargs) self.api.vms.add(vm) self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout) if power_on: version = self.api.get_product_info().get_full_version() cfme_template = any( template.startswith(pfx) for pfx in ["cfme-55", "s_tpl", "sprout_template"]) if cfme_template and version.startswith("3.4"): action = params.Action(vm=params.VM(initialization=params.Initialization( cloud_init=params.CloudInit(users=params.Users( user=[params.User(user_name="root", password="******")]))))) ciargs = {} ciargs['initialization'] = action self.start_vm(vm_name=kwargs['vm_name'], **ciargs) else: self.start_vm(vm_name=kwargs['vm_name']) return kwargs['vm_name']
def he_vm_migrate(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] migrate_params = params.Action( host=params.Host(name=sorted(host_names)[1]), ) api.vms.get(HE_VM_NAME).migrate(migrate_params) host = api.hosts.get(name=sorted(host_names)[1]) testlib.assert_true_within_long( lambda: api.vms.get(HE_VM_NAME).host.id == host.id, )
def vm_migrate(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] migrate_params = params.Action( host=params.Host(name=sorted(host_names)[2]), ) api.vms.get(VM0_NAME).migrate(migrate_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def _detach_vlan_from_host(): attachment = _get_networkattachment_by_network_id(host, network_id) removal_action = params.Action( removed_network_attachments=params.NetworkAttachments( network_attachment=[params.NetworkAttachment( id=attachment.id)])) host.setupnetworks(removal_action)
def detach_disk(self, instance, volume): """detach a volume.""" try: vm = self._session.vms.get(name=instance) disk = vm.disks.get(name=volume) detach = params.Action(detach=True) disk.delete(action=detach) except Exception as e: LOG.debug(_("disk detach error %s" %str(e) ))
def vm_run(api): start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy(host=params.Host( name=HOSTS[0], ), ), ), ) api.vms.get(VM1_NAME).start(start_params) testlib.assert_true_within( func=lambda: api.vms.get(VM1_NAME).status.state == 'up', timeout=SHORT_TIMEOUT, )
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[1]), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def import_templates(api): templates = api.storagedomains.get('templates', ).templates.list( unregistered=True, ) for template in templates: template.register(action=params.Action(cluster=params.Cluster( name=CLUSTER_NAME, ), ), ) for template in api.templates.list(): testlib.assert_true_within_short( lambda: api.templates.get(template.name).status.state == 'ok', )
def modify_ip_config(api, host, network_name, ip_configuration): network_id = api.networks.get(name=network_name).id attachment = _get_attachment_by_id(host, network_id) attachment.set_ip_address_assignments(ip_configuration) attachment_action = params.Action( modified_network_attachments=params.NetworkAttachments( network_attachment=[attachment]), check_connectivity=True) return host.setupnetworks(attachment_action)
def _modify_ip_config(api, host, ip_configuration): network_id = api.networks.get(name=VLAN100_NET).id attachment = _get_networkattachment_by_network_id(host, network_id) attachment.set_ip_address_assignments(ip_configuration) attachment_action = params.Action( modified_network_attachments=params.NetworkAttachments( network_attachment=[attachment]), check_connectivity=True) nt.assert_true(host.setupnetworks(attachment_action))
def template_export(api): template_cirros = api.templates.get(TEMPLATE_CIRROS) if template_cirros is None: raise SkipTest('{0}: template {1} is missing'.format( template_export.__name__, TEMPLATE_CIRROS)) template_cirros.export( params.Action( storage_domain=api.storagedomains.get(SD_TEMPLATES_NAME))) testlib.assert_true_within_long( lambda: api.templates.get(TEMPLATE_CIRROS).status.state == 'ok', )
def import_templates(api): #TODO: Fix the exported domain generation raise SkipTest('Exported domain generation not supported yet') templates = api.storagedomains.get(SD_TEMPLATES_NAME, ).templates.list( unregistered=True, ) for template in templates: template.register(action=params.Action(cluster=params.Cluster( name=CLUSTER_NAME, ), ), ) for template in api.templates.list(): testlib.assert_true_within_short( lambda: api.templates.get(template.name).status.state == 'ok', )
def detach_network_from_host(api, host, network_name, bond_name=None): network_id = api.networks.get(name=network_name).id attachment = _get_attachment_by_id(host, network_id) bonds = [nic for nic in host.nics.list() if bond_name and nic.name == bond_name] # there is no more than one bond removal_action = params.Action( removed_bonds=params.HostNics(host_nic=bonds), removed_network_attachments=params.NetworkAttachments( network_attachment=[params.NetworkAttachment( id=attachment.id)])) return host.setupnetworks(removal_action)
def _detach(vm, disk): """ Detach specified disk from VM Parameters: disk - ovirtsdk.infrastructure.brokers.VMDisk object to detach vm - ovirtsdk.infrastructure.brokers.VM object to detach disk from Returns: 0 - No change 1 - Change """ if not MODULE_CHECK_MODE: disk.delete(action=params.Action(detach=True)) return 1
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), users=params.Users( active=True, user=[params.User( user_name='root', password='******' )] ), network_configuration=params.NetworkConfiguration( nics=params.Nics( nic=[params.NIC( name='eth0', boot_protocol='STATIC', on_boot='True', network=params.Network( ip=params.IP( address='192.168.1.2.', netmask='255.255.255.0', gateway='192.168.1.1', ), ), )] ), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def attach_network_to_host(api, host, nic_name, network_name, ip_configuration, bonds=[]): network_attachment = params.NetworkAttachment( network=params.Network(name=network_name), host_nic=params.HostNIC(name=nic_name), ip_address_assignments=ip_configuration) attachment_action = params.Action( modified_bonds=params.HostNics(host_nic=bonds), modified_network_attachments=params.NetworkAttachments( network_attachment=[network_attachment]), check_connectivity=True) return host.setupnetworks(attachment_action)
def backup_to_export(api, config, vm_from_list): """ Export snaphot to en export domain :param api: ovirtsdk api :param config: Configuration :vm_name: Name of VM to backup """ vm_clone_name = vm_from_list + config.get_vm_middle() + config.get_vm_suffix() vm_clone = api.vms.get(vm_clone_name) logger.info("Export of VM (%s) started ..." % vm_clone_name) if not config.get_dry_run(): vm_clone.export(params.Action(storage_domain=api.storagedomains.get(config.get_export_domain()))) VMTools.wait_for_vm_operation(api, config, "Exporting", vm_from_list) logger.info("Exporting finished")
def deploy_template(self, template, *args, **kwargs): power_on = kwargs.get('power_on', True) vm_name = super(RHEVMSystem, self).deploy_template(template, *args, **kwargs) if power_on: version = self.api.get_product_info().get_full_version() cfme_template = any( template.startswith(pfx) for pfx in ["cfme-55", "s_tpl", "sprout_template"]) if cfme_template and version.startswith("3.4"): action = params.Action(vm=params.VM(initialization=params.Initialization( cloud_init=params.CloudInit(users=params.Users( user=[params.User(user_name="root", password="******")]))))) self.start_vm(vm_name=vm_name, initialization=action) else: self.start_vm(vm_name=vm_name) return vm_name
def run_vms(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() vm_ip = '.'.join(engine.ip().split('.')[0:3] + ['199']) vm_gw = '.'.join(engine.ip().split('.')[0:3] + ['1']) host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[0]), ), initialization=params.Initialization( domain=params.Domain(name='lago.example.com'), cloud_init=params.CloudInit( host=params.Host(address='VM0'), users=params.Users(active=True, user=[ params.User(user_name='root', password='******') ]), network_configuration=params.NetworkConfiguration( nics=params.Nics(nic=[ params.NIC( name='eth0', boot_protocol='STATIC', on_boot=True, network=params.Network(ip=params.IP( address=vm_ip, netmask='255.255.255.0', gateway=vm_gw, ), ), ) ]), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) api.vms.get(BACKUP_VM_NAME).start(start_params) start_params.vm.initialization.cloud_init = params.CloudInit( host=params.Host(address='VM2'), ) api.vms.get(VM2_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up' and api.vms.get( BACKUP_VM_NAME).status.state == 'up', )
def _attach_vlan_to_host(api, host, ip_configuration): mgmt_attachment = _get_mgmt_attachment(api, host) mgmt_nic_id = mgmt_attachment.get_host_nic().id mgmt_nic_name = host.nics.get(id=mgmt_nic_id).name vlan_network_attachment = params.NetworkAttachment( network=params.Network(name=VLAN100_NET), host_nic=params.HostNIC(name=mgmt_nic_name), ip_address_assignments=ip_configuration) attachment_action = params.Action( modified_network_attachments=params.NetworkAttachments( network_attachment=[vlan_network_attachment]), check_connectivity=True) host.setupnetworks(attachment_action)