def build_entity(self): storage_type = self._get_storage_type() storage = self._get_storage() self._login(storage_type, storage) return otypes.StorageDomain( name=self.param('name'), description=self.param('description'), comment=self.param('comment'), wipe_after_delete=self.param('wipe_after_delete'), backup=self.param('backup'), critical_space_action_blocker=self.param( 'critical_space_action_blocker'), warning_low_space_indicator=self.param('warning_low_space'), import_=True if self.param('state') == 'imported' else None, id=self.param('id') if self.param('state') == 'imported' else None, type=otypes.StorageDomainType( storage_type if storage_type == 'managed_block_storage' else self.param('domain_function')), host=otypes.Host(name=self.param('host')), discard_after_delete=self.param('discard_after_delete'), storage=otypes.HostStorage( driver_options=[ otypes.Property(name=do.get('name'), value=do.get('value')) for do in storage.get('driver_options') ] if storage.get('driver_options') else None, driver_sensitive_options=[ otypes.Property(name=dso.get('name'), value=dso.get('value')) for dso in storage.get('driver_sensitive_options') ] if storage.get('driver_sensitive_options') else None, type=otypes.StorageType(storage_type), logical_units=[ otypes.LogicalUnit( id=lun_id, address=storage.get('address'), port=int(storage.get('port', 3260)), target=target, username=storage.get('username'), password=storage.get('password'), ) for lun_id, target in self.__target_lun_map(storage) ] if storage_type in ['iscsi', 'fcp'] else None, override_luns=storage.get('override_luns'), mount_options=storage.get('mount_options'), vfs_type=('glusterfs' if storage_type in ['glusterfs'] else storage.get('vfs_type')), address=storage.get('address'), path=storage.get('path'), nfs_retrans=storage.get('retrans'), nfs_timeo=storage.get('timeout'), nfs_version=otypes.NfsVersion(storage.get('version')) if storage.get('version') else None, ) if storage_type is not None else None)
def create(self, cluster, vm): """ :param cluster: clusterlib.Cluster :param vm: LagoVm """ sdk_type = types.Host(name=vm.name(), description='host %s' % vm.name(), address=vm.name(), root_password=str(vm.root_password()), override_iptables=True, cluster=cluster.get_sdk_type()) self._create_sdk_entity(sdk_type)
def build_entity(self): hosts_service = self._connection.system_service().hosts_service() logical_unit = self._module.params.get('logical_unit') disk = otypes.Disk( id=self._module.params.get('id'), name=self._module.params.get('name'), description=self._module.params.get('description'), format=otypes.DiskFormat(self._module.params.get('format')) if self._module.params.get('format') else None, content_type=otypes.DiskContentType( self._module.params.get('content_type')) if self._module.params.get('content_type') else None, sparse=self._module.params.get('sparse') if self._module.params.get('sparse') is not None else self._module.params.get('format') != 'raw', openstack_volume_type=otypes.OpenStackVolumeType( name=self.param('openstack_volume_type')) if self.param('openstack_volume_type') else None, provisioned_size=convert_to_bytes(self._module.params.get('size')), storage_domains=[ otypes.StorageDomain( name=self._module.params.get('storage_domain'), ), ], quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') else None, shareable=self._module.params.get('shareable'), wipe_after_delete=self.param('wipe_after_delete'), lun_storage=otypes.HostStorage( host=otypes.Host(id=get_id_by_name( hosts_service, self._module.params.get('host'))) if self.param('host') else None, type=otypes.StorageType( logical_unit.get('storage_type', 'iscsi')), logical_units=[ otypes.LogicalUnit( address=logical_unit.get('address'), port=logical_unit.get('port', 3260), target=logical_unit.get('target'), id=logical_unit.get('id'), username=logical_unit.get('username'), password=logical_unit.get('password'), ) ], ) if logical_unit else None, ) if hasattr( disk, 'initial_size') and self._module.params['upload_image_path']: disk.initial_size = convert_to_bytes( self._module.params.get('size')) return disk
def add_host(pname, pdescription, paddress, proot_password, pcluster, wait_for_up): print('Adding Host : ' + pname + '...') host = hosts_service.add( types.Host( name=pname, description=pdescription, address=paddress, root_password=proot_password, cluster=types.Cluster(name=pcluster, ), ), ) if wait_for_up: host_service = hosts_service.host_service(host.id) track_status(host_service, types.HostStatus.UP, 1)
def build_entity(self): storage_type = self._get_storage_type() storage = self._get_storage() self._login(storage_type, storage) return otypes.StorageDomain( name=self._module.params['name'], description=self._module.params['description'], comment=self._module.params['comment'], wipe_after_delete=self._module.params['wipe_after_delete'], backup=self._module.params['backup'], critical_space_action_blocker=self._module. params['critical_space_action_blocker'], warning_low_space_indicator=self._module. params['warning_low_space'], import_=(True if self._module.params['state'] == 'imported' else None), id=(self._module.params['id'] if self._module.params['state'] == 'imported' else None), type=otypes.StorageDomainType( self._module.params['domain_function']), host=otypes.Host(name=self._module.params['host'], ), discard_after_delete=self._module.params['discard_after_delete'] if storage_type in ['iscsi', 'fcp'] else False, storage=otypes.HostStorage( type=otypes.StorageType(storage_type), logical_units=[ otypes.LogicalUnit( id=lun_id, address=storage.get('address'), port=int(storage.get('port', 3260)), target=storage.get('target'), username=storage.get('username'), password=storage.get('password'), ) for lun_id in (storage.get('lun_id') if isinstance( storage.get('lun_id'), list ) else [storage.get('lun_id')]) ] if storage_type in ['iscsi', 'fcp'] else None, override_luns=storage.get('override_luns'), mount_options=storage.get('mount_options'), vfs_type=('glusterfs' if storage_type in ['glusterfs'] else storage.get('vfs_type')), address=storage.get('address'), path=storage.get('path'), nfs_retrans=storage.get('retrans'), nfs_timeo=storage.get('timeout'), nfs_version=otypes.NfsVersion(storage.get('version')) if storage.get('version') else None, ) if storage_type is not None else None)
def create(self, cluster, name, address, root_password): """ :param cluster: clusterlib.Cluster :param name: str :param address: str :param root_password: str """ sdk_type = types.Host(name=name, description='host %s' % name, address=address, root_password=root_password, override_iptables=True, cluster=cluster.get_sdk_type()) self._create_sdk_entity(sdk_type) self._root_password = root_password
def _migrate_vm(self, entity): vm_host = self._module.params['host'] vm_service = self._service.vm_service(entity.id) if vm_host is not None: # In case VM is preparing to be UP, wait to be up, to migrate it: if entity.status == otypes.VmStatus.UP: hosts_service = self._connection.system_service().hosts_service() current_vm_host = hosts_service.host_service(entity.host.id).get().name if vm_host != current_vm_host: if not self._module.check_mode: vm_service.migrate(host=otypes.Host(name=vm_host)) self._wait_for_UP(vm_service) self.changed = True return entity
def deploy(self, vm_name, **kwargs): """ Deploy a VM using this template Args: vm_name -- name of VM to create cluster -- cluster to which VM should be deployed timeout (optional) -- default 900 power_on (optional) -- default True placement_policy_host (optional) placement_policy_affinity (optional) cpu (optional) -- number of cpu cores sockets (optional) -- numbner of cpu sockets ram (optional) -- memory in GB Returns: wrapanapi.systems.rhevm.RHEVMVirtualMachine """ self.logger.debug(' Deploying RHEV template %s to VM %s', self.name, vm_name) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': vm_name, 'cluster': self.system.get_cluster(kwargs['cluster']), 'template': self.raw, } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = types.Host(name=kwargs['placement_policy_host']) policy = types.VmPlacementPolicy( hosts=[host], affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy if 'cpu' in kwargs: vm_kwargs['cpu'] = types.Cpu( topology=types.CpuTopology( cores=kwargs['cpu'], sockets=kwargs.pop('sockets') ) ) if 'ram' in kwargs: vm_kwargs['memory'] = int(kwargs['ram']) # in Bytes vms_service = self.system.api.system_service().vms_service() vms_service.add(types.Vm(**vm_kwargs)) vm = self.system.get_vm(vm_name) vm.wait_for_state(VmState.STOPPED, timeout=timeout) if power_on: vm.start() return vm
def rename(self, new_h, old_p, new_p): assert self == new_h old_host = self._get_object(old_p) if not old_host: return -errno.ENOENT # Return EINVAL if the new destination exists try: self._get_object(new_p) return -errno.EINVAL except RuntimeError: pass svc = self._service.host_service(old_host.id) update_host = types.Host() update_host.name = new_p["name"] svc.update(update_host) return 0
def find_host(connection): """Return the current host object or None.""" try: with builtins.open("/etc/vdsm/vdsm.id") as f: vdsm_id = f.readline().strip() except Exception as e: # This is most likely not an oVirt host. debug("cannot read /etc/vdsm/vdsm.id, using any host: %s" % e) return None debug("hw_id = %r" % vdsm_id) system_service = connection.system_service() storage_name = params['output_storage'] data_centers = system_service.data_centers_service().list( search='storage.name=%s' % storage_name, case_sensitive=True, ) if len(data_centers) == 0: # The storage domain is not attached to a datacenter # (shouldn't happen, would fail on disk creation). debug("storange domain (%s) is not attached to a DC" % storage_name) return None datacenter = data_centers[0] debug("datacenter = %s" % datacenter.name) hosts_service = system_service.hosts_service() hosts = hosts_service.list( search="hw_id=%s and datacenter=%s and status=Up" % (vdsm_id, datacenter.name), case_sensitive=True, ) if len(hosts) == 0: # Couldn't find a host that's fulfilling the following criteria: # - 'hw_id' equals to 'vdsm_id' # - Its status is 'Up' # - Belongs to the storage domain's datacenter debug("cannot find a running host with hw_id=%r, " "that belongs to datacenter '%s', " "using any host" % (vdsm_id, datacenter.name)) return None host = hosts[0] debug("host.id = %r" % host.id) return types.Host(id=host.id)
def build_entity(self): return otypes.Host( name=self._module.params['name'], cluster=otypes.Cluster(name=self._module.params['cluster']) if self._module.params['cluster'] else None, comment=self._module.params['comment'], address=self._module.params['address'], root_password=self._module.params['password'], ssh=otypes.Ssh(authentication_method='publickey', ) if self._module.params['public_key'] else None, kdump_status=otypes.KdumpStatus( self._module.params['kdump_integration']) if self._module.params['kdump_integration'] else None, spm=otypes.Spm(priority=self._module.params['spm_priority'], ) if self._module.params['spm_priority'] else None, override_iptables=self._module.params['override_iptables'], )
def add_storage(pname, pdescription, phost, paddress, ppath): print('Adding Storage : ' + pname + '...') sd = sds_service.add( types.StorageDomain( name=pname, description=pdescription, type=types.StorageDomainType.DATA, host=types.Host(name=phost, ), storage=types.HostStorage( type=types.StorageType.NFS, address=paddress, path=ppath, ), ), ) sd_service = sds_service.storage_domain_service(sd.id) track_status(sd_service, types.StorageDomainStatus.UNATTACHED, 1)
def build_entity(self): storage_type = self._get_storage_type() storage = self._get_storage() self._login(storage_type, storage) return otypes.StorageDomain( name=self._module.params['name'], description=self._module.params['description'], comment=self._module.params['comment'], import_=True if (self._module.params['state'] == 'imported' and storage_type in ['iscsi', 'fcp']) else None, id=self._module.params['id'] if (self._module.params['state'] == 'imported' and storage_type in ['iscsi', 'fcp']) else None, type=otypes.StorageDomainType( self._module.params['domain_function'] ), host=otypes.Host( name=self._module.params['host'], ), storage=otypes.HostStorage( type=otypes.StorageType(storage_type), logical_units=[ otypes.LogicalUnit( id=lun_id, address=storage.get('address'), port=storage.get('port', 3260), target=storage.get('target'), username=storage.get('username'), password=storage.get('password'), ) for lun_id in ( storage.get('lun_id') if isinstance(storage.get('lun_id'), list) else [storage.get('lun_id')] ) ] if storage_type in ['iscsi', 'fcp'] else None, override_luns=storage.get('override_luns'), mount_options=storage.get('mount_options'), vfs_type='glusterfs' if storage_type in ['glusterfs'] else storage.get('vfs_type'), address=storage.get('address'), path=storage.get('path'), nfs_retrans=storage.get('retrans'), nfs_timeo=storage.get('timeout'), nfs_version=otypes.NfsVersion( storage.get('version') ) if storage.get('version') else None, ) if storage_type is not None else None )
def build_entity(self): affinity_group = otypes.AffinityGroup( name=self._module.params['name'], description=self._module.params['description'], positive=( self._module.params['vm_rule'] == 'positive' ) if self._module.params['vm_rule'] is not None else None, enforcing=( self._module.params['vm_enforcing'] ) if self._module.params['vm_enforcing'] is not None else None, ) # Those attributes are Supported since 4.1: if not engine_supported(self._connection, '4.1'): return affinity_group affinity_group.hosts_rule = otypes.AffinityRule( positive=( self.param('host_rule') == 'positive' ) if self.param('host_rule') is not None else None, enforcing=self.param('host_enforcing'), ) if ( self.param('host_enforcing') is not None or self.param('host_rule') is not None ) else None affinity_group.vms_rule = otypes.AffinityRule( positive=( self.param('vm_rule') == 'positive' ) if self.param('vm_rule') is not None else None, enforcing=self.param('vm_enforcing'), enabled=( self.param('vm_rule') in ['negative', 'positive'] ) if self.param('vm_rule') is not None else None, ) if ( self.param('vm_enforcing') is not None or self.param('vm_rule') is not None ) else None affinity_group.hosts = [ otypes.Host(id=host_id) for host_id in self._host_ids ] if self._host_ids is not None else None return affinity_group
def import_template(module, connection): templates_service = connection.system_service().templates_service() if search_by_name(templates_service, module.params['name']) is not None: return False events_service = connection.system_service().events_service() last_event = events_service.list(max=1)[0] external_template = module.params['kvm'] imports_service = connection.system_service().external_template_imports_service() imported_template = imports_service.add( otypes.ExternalTemplateImport( template=otypes.Template( name=module.params['name'] ), url=external_template.get('url'), cluster=otypes.Cluster( name=module.params['cluster'], ) if module.params['cluster'] else None, storage_domain=otypes.StorageDomain( name=external_template.get('storage_domain'), ) if external_template.get('storage_domain') else None, host=otypes.Host( name=external_template.get('host'), ) if external_template.get('host') else None, clone=external_template.get('clone', None), ) ) # Wait until event with code 1158 for our template: templates_service = connection.system_service().templates_service() wait( service=templates_service.template_service(imported_template.template.id), condition=lambda tmp: len(events_service.list( from_=int(last_event.id), search='type=1158 and message=*%s*' % tmp.name, ) ) > 0 if tmp is not None else False, fail_condition=lambda tmp: tmp is None, timeout=module.params['timeout'], poll_interval=module.params['poll_interval'], ) return True
def test_import_vm1(engine_api): engine = engine_api.system_service() sd = engine.storage_domains_service().list( search='name={}'.format(SD_ISCSI_NAME))[0] cluster = engine.clusters_service().list( search='name={}'.format(TEST_CLUSTER))[0] imports_service = engine.external_vm_imports_service() host = test_utils.get_first_active_host_by_name(engine) correlation_id = "test_validate_ova_import_vm" with engine_utils.wait_for_event(engine, 1165): # IMPORTEXPORT_STARTING_IMPORT_VM imports_service.add(types.ExternalVmImport( name=IMPORTED_VM_NAME, provider=types.ExternalVmProviderType.KVM, url=IMPORTED_OVA_NAME, cluster=types.Cluster(id=cluster.id), storage_domain=types.StorageDomain(id=sd.id), host=types.Host(id=host.id), sparse=True), async=True, query={'correlation_id': correlation_id})
def find_host(connection): """Return the current host object or None.""" try: with builtins.open("/etc/vdsm/vdsm.id") as f: vdsm_id = f.readline().strip() except Exception as e: # This is most likely not an oVirt host. debug("cannot read /etc/vdsm/vdsm.id, using any host: %s" % e) return None system_service = connection.system_service() storage_name = params['output_storage'] data_centers = system_service.data_centers_service().list( search='storage=%s' % storage_name, case_sensitive=False, ) if len(data_centers) == 0: # The storage domain is not attached to a datacenter # (shouldn't happen, would fail on disk creation). return None datacenter = data_centers[0] debug("hw_id = %r, datacenter = %s" % (vdsm_id, datacenter.name)) hosts_service = system_service.hosts_service() hosts = hosts_service.list( search="hw_id=%s and datacenter=%s and status=Up" % (vdsm_id, datacenter.name), case_sensitive=False, ) if len(hosts) == 0: # This oVirt host is not registered with engine. debug("cannot find host with hw_id=%r, using any host" % vdsm_id) return None host = hosts[0] debug("host.id = %r" % host.id) return types.Host(id=host.id)
def build_entity(self): return otypes.AffinityGroup( name=self._module.params['name'], description=self._module.params['description'], positive=( self._module.params['vm_rule'] == 'positive' ) if self._module.params['vm_rule'] is not None else None, enforcing=( self._module.params['vm_enforcing'] ) if self._module.params['vm_enforcing'] is not None else None, vms=[ otypes.Vm(id=vm_id) for vm_id in self._vm_ids ] if self._vm_ids is not None else None, hosts=[ otypes.Host(id=host_id) for host_id in self._host_ids ] if self._host_ids is not None else None, vms_rule=otypes.AffinityRule( positive=( self._module.params['vm_rule'] == 'positive' ) if self._module.params['vm_rule'] is not None else None, enforcing=self._module.params['vm_enforcing'], enabled=( self._module.params['vm_rule'] in ['negative', 'positive'] ) if self._module.params['vm_rule'] is not None else None, ) if ( self._module.params['vm_enforcing'] is not None or self._module.params['vm_rule'] is not None ) else None, hosts_rule=otypes.AffinityRule( positive=( self._module.params['host_rule'] == 'positive' ) if self._module.params['host_rule'] is not None else None, enforcing=self._module.params['host_enforcing'], ) if ( self._module.params['host_enforcing'] is not None or self._module.params['host_rule'] is not None ) else None, )
def build_entity(self): return otypes.Host( name=self._module.params['name'], cluster=otypes.Cluster(name=self._module.params['cluster']) if self._module.params['cluster'] else None, comment=self._module.params['comment'], address=self._module.params['address'], root_password=self._module.params['password'], ssh=otypes.Ssh(authentication_method='publickey', ) if self._module.params['public_key'] else None, kdump_status=otypes.KdumpStatus( self._module.params['kdump_integration']) if self._module.params['kdump_integration'] else None, spm=otypes.Spm(priority=self._module.params['spm_priority'], ) if self._module.params['spm_priority'] else None, override_iptables=self._module.params['override_iptables'], display=otypes.Display( address=self._module.params['override_display'], ) if self._module.params['override_display'] else None, os=otypes.OperatingSystem(custom_kernel_cmdline=' '.join( self._module.params['kernel_params']), ) if self._module.params['kernel_params'] else None, )
def build_entity(self): return otypes.Host( name=self.param('name'), cluster=otypes.Cluster( name=self.param('cluster')) if self.param('cluster') else None, comment=self.param('comment'), address=self.param('address'), root_password=self.param('password'), ssh=otypes.Ssh(authentication_method=otypes.SshAuthenticationMethod .PUBLICKEY, ) if self.param('public_key') else None, kdump_status=otypes.KdumpStatus(self.param('kdump_integration')) if self.param('kdump_integration') else None, spm=otypes.Spm(priority=self.param('spm_priority'), ) if self.param('spm_priority') else None, override_iptables=self.param('override_iptables'), display=otypes.Display(address=self.param('override_display'), ) if self.param('override_display') else None, os=otypes.OperatingSystem(custom_kernel_cmdline=' '.join( self.param('kernel_params')), ) if self.param('kernel_params') else None, power_management=otypes.PowerManagement( enabled=self.param('power_management_enabled'), ) if self.param('power_management_enabled') is not None else None, )
def ui_command_create(self, name, address, password, cluster, description=None): host = self._hosts_service.add( types.Host( name=name, description=description if description is not None else '', address=address, root_password=password, cluster=types.Cluster(name=cluster, ), ), ) # Wait till the host is up: host_service = self._hosts_service.host_service(host.id) start_time = time.time() timeout = 15 * 60 elapsed = False while not elapsed: time.sleep(5) host = host_service.get() if host.status == types.HostStatus.UP or host.status == types.HostStatus.NonOperational: break if (time.time() - start_time) > timeout: elapsed = True break if host.status == types.HostStatus.UP: self.shell.log.info("Host was successfully added.") self.refresh() return if elapsed or host.status != types.HostStatus.UP: self.shell.log.info("Host was not added properly. Status: %s" % host.status)
def build_entity(self): return otypes.Event( description=self._module.params['description'], severity=otypes.LogSeverity(self._module.params['severity']), origin=self._module.params['origin'], custom_id=self._module.params['custom_id'], id=self._module.params['id'], cluster=otypes.Cluster(id=self._module.params['cluster']) if self._module.params['cluster'] is not None else None, data_center=otypes.DataCenter( id=self._module.params['data_center']) if self._module.params['data_center'] is not None else None, host=otypes.Host(id=self._module.params['host']) if self._module.params['host'] is not None else None, storage_domain=otypes.StorageDomain( id=self._module.params['storage_domain']) if self._module.params['storage_domain'] is not None else None, template=otypes.Template(id=self._module.params['template']) if self._module.params['template'] is not None else None, user=otypes.User(id=self._module.params['user']) if self._module.params['user'] is not None else None, vm=otypes.Vm(id=self._module.params['vm']) if self._module.params['vm'] is not None else None, )
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached', 'exported', 'imported'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), description=dict(default=None), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None, choices=['virtio', 'ide', 'virtio_scsi']), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list', elements='str'), profile=dict(default=None), quota_id=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), content_type=dict( default='data', choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration'] ), backup=dict(default=None, type='str', choices=['incremental']), sparse=dict(default=None, type='bool'), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), scsi_passthrough=dict(default=None, type='str', choices=['disabled', 'filtered', 'unfiltered']), uses_scsi_reservation=dict(default=None, type='bool'), pass_discard=dict(default=None, type='bool'), propagate_errors=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), openstack_volume_type=dict(default=None), image_provider=dict(default=None), host=dict(default=None), wipe_after_delete=dict(type='bool', default=None), activate=dict(default=None, type='bool'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) lun = module.params.get('logical_unit') host = module.params['host'] # Fail when host is specified with the LUN id. Lun id is needed to identify # an existing disk if already available inthe environment. if (host and lun is None) or (host and lun.get("id") is None): module.fail_json( msg="Can not use parameter host ({0!s}) without " "specifying the logical_unit id".format(host) ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) force_create = False vm_service = get_vm_service(connection, module) if lun: disk = _search_by_lun(disks_service, lun.get('id')) else: disk = disks_module.search_entity(search_params=searchable_attributes(module)) if vm_service and disk: # If the VM don't exist in VMs disks, but still it's found it means it was found # for template with same name as VM, so we should force create the VM disk. force_create = disk.id not in [a.disk.id for a in vm_service.disk_attachments_service().list() if a.disk] ret = None # First take care of creating the VM, if needed: if state in ('present', 'detached', 'attached'): # Always activate disk when its being created if vm_service is not None and disk is None: module.params['activate'] = module.params['activate'] is None or module.params['activate'] ret = disks_module.create( entity=disk if not force_create else None, result_state=otypes.DiskStatus.OK if lun is None else None, search_params=searchable_attributes(module), fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, force_create=force_create, _wait=True if module.params['upload_image_path'] else module.params['wait'], ) is_new_disk = ret['changed'] ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): if module.params['format'] == 'cow' and module.params['content_type'] == 'iso': module.warn("To upload an ISO image 'format' parameter needs to be set to 'raw'.") uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if ( module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) ): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: if not module.check_mode: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) # Export disk as image to glance domain elif state == 'exported': disk = disks_module.search_entity() if disk is None: module.fail_json( msg="Can not export given disk '%s', it doesn't exist" % module.params.get('name') or module.params.get('id') ) if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='export', action_condition=lambda d: module.params['image_provider'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain(name=module.params['image_provider']), ) elif state == 'imported': glance_service = connection.system_service().openstack_image_providers_service() image_provider = search_by_name(glance_service, module.params['image_provider']) images_service = glance_service.service(image_provider.id).images_service() entity_id = get_id_by_name(images_service, module.params['name']) images_service.service(entity_id).import_( storage_domain=otypes.StorageDomain( name=module.params['storage_domain'] ) if module.params['storage_domain'] else None, disk=otypes.Disk( name=module.params['name'] ), import_as_template=False, ) # Wait for disk to appear in system: disk = disks_module.wait_for_import( condition=lambda t: t.status == otypes.DiskStatus.OK ) ret = disks_module.create(result_state=otypes.DiskStatus.OK) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if vm_service: disk_attachments_service = vm_service.disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() # When the host parameter is specified and the disk is not being # removed, refresh the information about the LUN. if state != 'absent' and host: hosts_service = connection.system_service().hosts_service() host_id = get_id_by_name(hosts_service, host) disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id)) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def build_entity(self): hosts_service = self._connection.system_service().hosts_service() logical_unit = self._module.params.get('logical_unit') size = convert_to_bytes(self._module.params.get('size')) if not size and self._module.params.get('upload_image_path'): out = subprocess.check_output( ["qemu-img", "info", "--output", "json", self._module.params.get('upload_image_path')]) image_info = json.loads(out) size = image_info["virtual-size"] disk = otypes.Disk( id=self._module.params.get('id'), name=self._module.params.get('name'), description=self._module.params.get('description'), format=otypes.DiskFormat( self._module.params.get('format') ) if self._module.params.get('format') else None, content_type=otypes.DiskContentType( self._module.params.get('content_type') ) if self._module.params.get('content_type') else None, sparse=self._module.params.get( 'sparse' ) if self._module.params.get( 'sparse' ) is not None else self._module.params.get('format') != 'raw', openstack_volume_type=otypes.OpenStackVolumeType( name=self.param('openstack_volume_type') ) if self.param('openstack_volume_type') else None, provisioned_size=size, storage_domains=[ otypes.StorageDomain( name=self._module.params.get('storage_domain'), ), ], quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') else None, shareable=self._module.params.get('shareable'), sgio=otypes.ScsiGenericIO(self.param('scsi_passthrough')) if self.param('scsi_passthrough') else None, propagate_errors=self.param('propagate_errors'), backup=otypes.DiskBackup(self.param('backup')) if self.param('backup') else None, wipe_after_delete=self.param('wipe_after_delete'), lun_storage=otypes.HostStorage( host=otypes.Host( id=get_id_by_name(hosts_service, self._module.params.get('host')) ) if self.param('host') else None, type=otypes.StorageType( logical_unit.get('storage_type', 'iscsi') ), logical_units=[ otypes.LogicalUnit( address=logical_unit.get('address'), port=logical_unit.get('port', 3260), target=logical_unit.get('target'), id=logical_unit.get('id'), username=logical_unit.get('username'), password=logical_unit.get('password'), ) ], ) if logical_unit else None, ) if hasattr(disk, 'initial_size') and self._module.params['upload_image_path']: out = subprocess.check_output([ 'qemu-img', 'measure', '-O', 'qcow2' if self._module.params.get('format') == 'cow' else 'raw', '--output', 'json', self._module.params['upload_image_path'] ]) measure = json.loads(out) disk.initial_size = measure["required"] return disk
def migrate(self, dst_host_name): self._service.migrate(host=types.Host(name=dst_host_name))
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=[ 'present', 'absent', 'maintenance', 'upgraded', 'started', 'restarted', 'stopped', 'reinstalled', 'iscsidiscover', 'iscsilogin' ], default='present', ), name=dict(required=True), comment=dict(default=None), cluster=dict(default=None), address=dict(default=None), password=dict(default=None, no_log=True), public_key=dict(default=False, type='bool', aliases=['ssh_public_key']), kdump_integration=dict(default=None, choices=['enabled', 'disabled']), spm_priority=dict(default=None, type='int'), override_iptables=dict(default=None, type='bool'), force=dict(default=False, type='bool'), timeout=dict(default=600, type='int'), override_display=dict(default=None), kernel_params=dict(default=None, type='list'), hosted_engine=dict(default=None, choices=['deploy', 'undeploy']), power_management_enabled=dict(default=None, type='bool'), activate=dict(default=True, type='bool'), iscsi=dict(default=None, type='dict'), check_upgrade=dict(default=True, type='bool'), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[['state', 'iscsidiscover', ['iscsi']], ['state', 'iscsilogin', ['iscsi']]]) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) hosts_service = connection.system_service().hosts_service() hosts_module = HostsModule( connection=connection, module=module, service=hosts_service, ) state = module.params['state'] host = control_state(hosts_module) if state == 'present': ret = hosts_module.create( deploy_hosted_engine=(module.params.get('hosted_engine') == 'deploy') if module.params.get('hosted_engine') is not None else None, result_state=hoststate.UP if host is None else None, fail_condition=hosts_module.failed_state_after_reinstall if host is None else lambda h: False, ) if module.params['activate'] and host is not None: ret = hosts_module.action( action='activate', action_condition=lambda h: h.status != hoststate.UP, wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, ) elif state == 'absent': ret = hosts_module.remove() elif state == 'maintenance': hosts_module.action( action='deactivate', action_condition=lambda h: h.status != hoststate.MAINTENANCE, wait_condition=lambda h: h.status == hoststate.MAINTENANCE, fail_condition=failed_state, ) ret = hosts_module.create() elif state == 'upgraded': result_state = hoststate.MAINTENANCE if host.status == hoststate.MAINTENANCE else hoststate.UP events_service = connection.system_service().events_service() last_event = events_service.list(max=1)[0] if module.params['check_upgrade']: hosts_module.action( action='upgrade_check', action_condition=lambda host: not host.update_available, wait_condition=lambda host: host.update_available or (len([ event for event in events_service.list( from_=int(last_event.id), search='type=885 and host.name=%s' % host.name, ) ]) > 0), fail_condition=lambda host: len([ event for event in events_service.list( from_=int(last_event.id), search='type=839 or type=887 and host.name=%s' % host.name, ) ]) > 0, ) # Set to False, because upgrade_check isn't 'changing' action: hosts_module._changed = False ret = hosts_module.action( action='upgrade', action_condition=lambda h: h.update_available, wait_condition=lambda h: h.status == result_state, post_action=lambda h: time.sleep(module.params['poll_interval'] ), fail_condition=hosts_module.failed_state_after_reinstall, ) elif state == 'iscsidiscover': host_id = get_id_by_name(hosts_service, module.params['name']) iscsi_param = module.params['iscsi'] iscsi_targets = hosts_service.service(host_id).iscsi_discover( iscsi=otypes.IscsiDetails( port=int(iscsi_param.get('port', 3260)), username=iscsi_param.get('username'), password=iscsi_param.get('password'), address=iscsi_param.get('address'), ), ) ret = { 'changed': False, 'id': host_id, 'iscsi_targets': iscsi_targets, } elif state == 'iscsilogin': host_id = get_id_by_name(hosts_service, module.params['name']) iscsi_param = module.params['iscsi'] ret = hosts_module.action( action='iscsi_login', iscsi=otypes.IscsiDetails( port=int(iscsi_param.get('port', 3260)), username=iscsi_param.get('username'), password=iscsi_param.get('password'), address=iscsi_param.get('address'), target=iscsi_param.get('target'), ), ) elif state == 'started': ret = hosts_module.action( action='fence', action_condition=lambda h: h.status == hoststate.DOWN, wait_condition=lambda h: h.status in [hoststate.UP, hoststate.MAINTENANCE], fail_condition=failed_state, fence_type='start', ) elif state == 'stopped': hosts_module.action( action='deactivate', action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN], wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN], fail_condition=failed_state, ) ret = hosts_module.action( action='fence', action_condition=lambda h: h.status != hoststate.DOWN, wait_condition=lambda h: h.status == hoststate.DOWN if module.params['wait'] else True, fail_condition=failed_state, fence_type='stop', ) elif state == 'restarted': ret = hosts_module.action( action='fence', wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, fence_type='restart', ) elif state == 'reinstalled': # Deactivate host if not in maintanence: hosts_module.action( action='deactivate', action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN], wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN], fail_condition=failed_state, ) # Reinstall host: hosts_module.action( action='install', action_condition=lambda h: h.status == hoststate.MAINTENANCE, post_action=hosts_module.post_reinstall, wait_condition=lambda h: h.status == hoststate.MAINTENANCE, fail_condition=hosts_module.failed_state_after_reinstall, host=otypes.Host( override_iptables=module.params['override_iptables'], ) if module.params['override_iptables'] else None, root_password=module.params['password'], ssh=otypes.Ssh(authentication_method=otypes. SshAuthenticationMethod.PUBLICKEY, ) if module.params['public_key'] else None, deploy_hosted_engine=(module.params.get('hosted_engine') == 'deploy') if module.params.get('hosted_engine') is not None else None, undeploy_hosted_engine=(module.params.get('hosted_engine') == 'undeploy') if module.params.get('hosted_engine') is not None else None, ) # Activate host after reinstall: ret = hosts_module.action( action='activate', action_condition=lambda h: h.status == hoststate.MAINTENANCE, wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, ) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def build_entity(self): return otypes.Host(power_management=otypes.PowerManagement( enabled=True, ), )
def create_vm(kwargs, call=None): ''' Create VM based on YAML file (must include parameter @ filename = "path/to/file" @ ) - If C(cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision). - If C(raw) format is used, disk storage will be allocated right away, also known as I(preallocated). ''' assert "filename" in kwargs, "Can't find filename parameter in function call" vm_info = parse_yaml(kwargs["filename"]) boot_devices = [] if "boot_first_device" in vm_info["common"]: if vm_info["common"]["boot_first_device"].lower() in [ "hd", "network", "cdrom" ]: if vm_info["common"]["boot_first_device"].lower() == "hd": boot_devices.append(types.BootDevice.HD) elif vm_info["common"]["boot_first_device"].lower() == "cdrom": boot_devices.append(types.BootDevice.CDROM) elif vm_info["common"]["boot_first_device"].lower() == "network": boot_devices.append(types.BootDevice.NETWORK) else: boot_devices.append(None) if "boot_second_device" in vm_info["common"]: if vm_info["common"]["boot_second_device"].lower() in [ "hd", "network", "cdrom" ]: if vm_info["common"]["boot_second_device"].lower() == "hd": boot_devices.append(types.BootDevice.HD) elif vm_info["common"]["boot_second_device"].lower() == "cdrom": boot_devices.append(types.BootDevice.CDROM) elif vm_info["common"]["boot_second_device"].lower() == "network": boot_devices.append(types.BootDevice.NETWORK) else: boot_devices.append(None) connection() vms_service = connection.system_service().vms_service() vms_service.add( types.Vm( name=vm_info["name"], os=types.OperatingSystem( type=vm_info["os_type"] if "os_type" in vm_info else "Other", boot=types.Boot(devices=boot_devices)), # type=vm_info["common"]["type"], placement_policy=types.VmPlacementPolicy( hosts=[types.Host(name=vm_info["common"]["host"])]), cpu=types.Cpu(topology=types.CpuTopology( cores=vm_info["CPU"]["cores"] if "cores" in vm_info["CPU"] else 1, sockets=vm_info["CPU"]["sockets"] if "sockets" in vm_info["CPU"] else 1, threads=vm_info["CPU"]["threads"] if "threads" in vm_info["CPU"] else 1, ), ), memory=1024 * 1024 * 1024 * int(vm_info["memory"]["memory"]), memory_policy=types.MemoryPolicy( guaranteed=1024 * 1024 * 1024 * vm_info["memory"]["guaranteed"] if "guaranteed" in vm_info["memory"] else 512 * 1024 * 1024 * int(vm_info["memory"]["memory"]), ballooning=vm_info["memory"]["ballooning"] if "ballooning" in vm_info["memory"] else True, max=1024 * 1024 * 1024 * vm_info["memory"]["maximum"] if "maximum" in vm_info["memory"] else 2048 * 1024 * 1024 * int(vm_info["memory"]["memory"]), ), cluster=types.Cluster(name=vm_info["common"]["cluster"], ), template=types.Template( name=vm_info["common"]["template"] if "template" in vm_info["common"] else "Blank", ), description=vm_info["common"]["description"] if "description" in vm_info["common"] else "Not provided", comment=vm_info["common"]["comment"] if "comment" in vm_info["common"] else "Not provided", soundcard_enabled=vm_info["common"]["soundcard_enabled"] if "soundcard_enabled" in vm_info["common"] else False, ), ) if "disks" in vm_info: for disk in vm_info["disks"]: attach_disk(disk, vm_info["name"]) if "networks" in vm_info: for network in vm_info["networks"]: attach_network(network, vm_info["name"]) # Check according to salt: if call != 'function': raise SaltCloudSystemExit( 'The show_instance action must be called with -f or --function.') connection.close() return {'Created': '{0} was created.'.format(vm_info["name"])}
def build_entity(self): return otypes.InstanceType( id=self.param('id'), name=self.param('name'), console=( otypes.Console(enabled=self.param('serial_console')) ) if self.param('serial_console') is not None else None, usb=( otypes.Usb(enabled=self.param('usb_support')) ) if self.param('usb_support') is not None else None, high_availability=otypes.HighAvailability( enabled=self.param('high_availability'), priority=self.param('high_availability_priority'), ) if self.param('high_availability') is not None or self.param('high_availability_priority') else None, cpu=otypes.Cpu( topology=otypes.CpuTopology( cores=self.param('cpu_cores'), sockets=self.param('cpu_sockets'), threads=self.param('cpu_threads'), ) if any(( self.param('cpu_cores'), self.param('cpu_sockets'), self.param('cpu_threads') )) else None, cpu_tune=otypes.CpuTune( vcpu_pins=[ otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning') ], ) if self.param('cpu_pinning') else None, mode=otypes.CpuMode(self.param('cpu_mode')) if self.param( 'cpu_mode') else None, ) if any(( self.param('cpu_cores'), self.param('cpu_sockets'), self.param('cpu_threads'), self.param('cpu_mode'), self.param('cpu_pinning') )) else None, os=otypes.OperatingSystem( type=self.param('operating_system'), boot=otypes.Boot( devices=[ otypes.BootDevice(dev) for dev in self.param('boot_devices') ], ) if self.param('boot_devices') else None ), rng_device=otypes.RngDevice( source=otypes.RngSource(self.param('rng_device')), rate=otypes.Rate( bytes=self.param('rng_bytes'), period=self.param('rng_period') ) ) if self.param('rng_device') else None, memory=convert_to_bytes( self.param('memory') ) if self.param('memory') else None, virtio_scsi=otypes.VirtioScsi( enabled=self.param('virtio_scsi') ) if self.param('virtio_scsi') else None, memory_policy=otypes.MemoryPolicy( guaranteed=convert_to_bytes(self.param('memory_guaranteed')), ballooning=self.param('ballooning_enabled'), max=convert_to_bytes(self.param('memory_max')), ) if any(( self.param('memory_guaranteed'), self.param('ballooning_enabled') is not None, self.param('memory_max') )) else None, description=self.param('description'), placement_policy=otypes.VmPlacementPolicy( affinity=otypes.VmAffinity(self.param('placement_policy')), hosts=[ otypes.Host(name=self.param('host')), ] if self.param('host') else None, ) if self.param('placement_policy') else None, soundcard_enabled=self.param('soundcard_enabled'), display=otypes.Display( smartcard_enabled=self.param('smartcard_enabled') ) if self.param('smartcard_enabled') is not None else None, io=otypes.Io( threads=self.param('io_threads'), ) if self.param('io_threads') is not None else None, )
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=[ 'present', 'absent', 'maintenance', 'upgraded', 'started', 'restarted', 'stopped', 'reinstalled', ], default='present', ), name=dict(required=True), comment=dict(default=None), cluster=dict(default=None), address=dict(default=None), password=dict(default=None, no_log=True), public_key=dict(default=False, type='bool', aliases=['ssh_public_key']), kdump_integration=dict(default=None, choices=['enabled', 'disabled']), spm_priority=dict(default=None, type='int'), override_iptables=dict(default=None, type='bool'), force=dict(default=False, type='bool'), timeout=dict(default=600, type='int'), override_display=dict(default=None), kernel_params=dict(default=None, type='list'), hosted_engine=dict(default=None, choices=['deploy', 'undeploy']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) hosts_service = connection.system_service().hosts_service() hosts_module = HostsModule( connection=connection, module=module, service=hosts_service, ) state = module.params['state'] control_state(hosts_module) if state == 'present': hosts_module.create( deploy_hosted_engine=( module.params.get('hosted_engine') == 'deploy' ) if module.params.get('hosted_engine') is not None else None, ) ret = hosts_module.action( action='activate', action_condition=lambda h: h.status == hoststate.MAINTENANCE, wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, ) elif state == 'absent': ret = hosts_module.remove() elif state == 'maintenance': hosts_module.action( action='deactivate', action_condition=lambda h: h.status != hoststate.MAINTENANCE, wait_condition=lambda h: h.status == hoststate.MAINTENANCE, fail_condition=failed_state, ) ret = hosts_module.create() elif state == 'upgraded': ret = hosts_module.action( action='upgrade', action_condition=lambda h: h.update_available, wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, ) elif state == 'started': ret = hosts_module.action( action='fence', action_condition=lambda h: h.status == hoststate.DOWN, wait_condition=lambda h: h.status in [hoststate.UP, hoststate.MAINTENANCE], fail_condition=failed_state, fence_type='start', ) elif state == 'stopped': hosts_module.action( action='deactivate', action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN], wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN], fail_condition=failed_state, ) ret = hosts_module.action( action='fence', action_condition=lambda h: h.status != hoststate.DOWN, wait_condition=lambda h: h.status == hoststate.DOWN if module.params['wait'] else True, fail_condition=failed_state, fence_type='stop', ) elif state == 'restarted': ret = hosts_module.action( action='fence', wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, fence_type='restart', ) elif state == 'reinstalled': # Deactivate host if not in maintanence: hosts_module.action( action='deactivate', action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN], wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN], fail_condition=failed_state, ) # Reinstall host: hosts_module.action( action='install', action_condition=lambda h: h.status == hoststate.MAINTENANCE, post_action=hosts_module.post_reinstall, wait_condition=lambda h: h.status == hoststate.MAINTENANCE, fail_condition=failed_state, host=otypes.Host( override_iptables=module.params['override_iptables'], ) if module.params['override_iptables'] else None, root_password=module.params['password'], ssh=otypes.Ssh( authentication_method=otypes.SshAuthenticationMethod.PUBLICKEY, ) if module.params['public_key'] else None, deploy_hosted_engine=( module.params.get('hosted_engine') == 'deploy' ) if module.params.get('hosted_engine') is not None else None, undeploy_hosted_engine=( module.params.get('hosted_engine') == 'undeploy' ) if module.params.get('hosted_engine') is not None else None, ) # Activate host after reinstall: ret = hosts_module.action( action='activate', action_condition=lambda h: h.status == hoststate.MAINTENANCE, wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, ) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)