def build_entity(self): return otypes.Host( id=self._module.params.get('id'), name=self.param('name'), cluster=otypes.Cluster( name=self.param('cluster') ) if self.param('cluster') else None, comment=self.param('comment'), address=self.param('address'), root_password=self.param('password'), ssh=otypes.Ssh( authentication_method=otypes.SshAuthenticationMethod.PUBLICKEY, ) if self.param('public_key') else None, spm=otypes.Spm( priority=self.param('spm_priority'), ) if self.param('spm_priority') else None, override_iptables=self.param('override_iptables'), display=otypes.Display( address=self.param('override_display'), ) if self.param('override_display') else None, os=otypes.OperatingSystem( custom_kernel_cmdline=' '.join(self.param('kernel_params')), ) if self.param('kernel_params') else None, power_management=otypes.PowerManagement( enabled=self.param('power_management_enabled'), kdump_detection=self.param('kdump_integration') == 'enabled', ) if self.param('power_management_enabled') is not None or self.param('kdump_integration') else None, vgpu_placement=otypes.VgpuPlacement( self.param('vgpu_placement') ) if self.param('vgpu_placement') is not None else None, )
def build_entity(self): storage_type = self._get_storage_type() storage = self._get_storage() self._login(storage_type, storage) return otypes.StorageDomain( name=self.param('name'), description=self.param('description'), comment=self.param('comment'), wipe_after_delete=self.param('wipe_after_delete'), backup=self.param('backup'), critical_space_action_blocker=self.param('critical_space_action_blocker'), warning_low_space_indicator=self.param('warning_low_space'), import_=True if self.param('state') == 'imported' else None, id=self.param('id') if self.param('state') == 'imported' else None, type=otypes.StorageDomainType(self.param('domain_function')), host=otypes.Host(name=self.param('host')), discard_after_delete=self.param('discard_after_delete'), storage=otypes.HostStorage( type=otypes.StorageType(storage_type), logical_units=[ otypes.LogicalUnit( id=lun_id, address=storage.get('address'), port=int(storage.get('port', 3260)), target=target, username=storage.get('username'), password=storage.get('password'), ) for lun_id, target in self.__target_lun_map(storage) ] if storage_type in ['iscsi', 'fcp'] else None, override_luns=storage.get('override_luns'), mount_options=storage.get('mount_options'), vfs_type=( 'glusterfs' if storage_type in ['glusterfs'] else storage.get('vfs_type') ), address=storage.get('address'), path=storage.get('path'), nfs_retrans=storage.get('retrans'), nfs_timeo=storage.get('timeout'), nfs_version=otypes.NfsVersion( storage.get('version') ) if storage.get('version') else None, ) if storage_type is not None else None )
def build_entity(self): affinity_group = otypes.AffinityGroup( name=self._module.params['name'], description=self._module.params['description'], positive=( self._module.params['vm_rule'] == 'positive' ) if self._module.params['vm_rule'] is not None else None, enforcing=( self._module.params['vm_enforcing'] ) if self._module.params['vm_enforcing'] is not None else None, ) # Those attributes are Supported since 4.1: if not engine_supported(self._connection, '4.1'): return affinity_group affinity_group.hosts_rule = otypes.AffinityRule( positive=( self.param('host_rule') == 'positive' ) if self.param('host_rule') is not None else None, enforcing=self.param('host_enforcing'), ) if ( self.param('host_enforcing') is not None or self.param('host_rule') is not None ) else None affinity_group.vms_rule = otypes.AffinityRule( positive=( self.param('vm_rule') == 'positive' ) if self.param('vm_rule') is not None else None, enforcing=self.param('vm_enforcing'), enabled=( self.param('vm_rule') in ['negative', 'positive'] ) if self.param('vm_rule') is not None else None, ) if ( self.param('vm_enforcing') is not None or self.param('vm_rule') is not None ) else None affinity_group.hosts = [ otypes.Host(id=host_id) for host_id in self._host_ids ] if self._host_ids is not None else None return affinity_group
def main(): argument_spec = jctanner.cloud_ovirt.ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached', 'exported'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), description=dict(default=None), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None,), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list'), profile=dict(default=None), quota_id=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), content_type=dict( default='data', choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration'] ), sparse=dict(default=None, type='bool'), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), openstack_volume_type=dict(default=None), image_provider=dict(default=None), host=dict(default=None), wipe_after_delete=dict(type='bool', default=None), activate=dict(default=None, type='bool'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) lun = module.params.get('logical_unit') host = module.params['host'] # Fail when host is specified with the LUN id. Lun id is needed to identify # an existing disk if already available inthe environment. if (host and lun is None) or (host and lun.get("id") is None): module.fail_json( msg="Can not use parameter host ({0!s}) without " "specifying the logical_unit id".format(host) ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) force_create = False vm_service = get_vm_service(connection, module) if lun: disk = _search_by_lun(disks_service, lun.get('id')) else: disk = disks_module.search_entity(search_params=searchable_attributes(module)) if vm_service and disk: # If the VM don't exist in VMs disks, but still it's found it means it was found # for template with same name as VM, so we should force create the VM disk. force_create = disk.id not in [a.disk.id for a in vm_service.disk_attachments_service().list() if a.disk] ret = None # First take care of creating the VM, if needed: if state in ('present', 'detached', 'attached'): ret = disks_module.create( entity=disk if not force_create else None, result_state=otypes.DiskStatus.OK if lun is None else None, fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, force_create=force_create, ) is_new_disk = ret['changed'] ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if ( module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) ): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: if not module.check_mode: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) # Export disk as image to glance domain elif state == 'exported': disk = disks_module.search_entity() if disk is None: module.fail_json( msg="Can not export given disk '%s', it doesn't exist" % module.params.get('name') or module.params.get('id') ) if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='export', action_condition=lambda d: module.params['image_provider'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain(name=module.params['image_provider']), ) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if vm_service: disk_attachments_service = vm_service.disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() # When the host parameter is specified and the disk is not being # removed, refresh the information about the LUN. if state != 'absent' and host: hosts_service = connection.system_service().hosts_service() host_id = get_id_by_name(hosts_service, host) disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id)) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def build_entity(self): return otypes.Host( power_management=otypes.PowerManagement( enabled=True, ), )
def build_entity(self): return otypes.Host()
def main(): argument_spec = jctanner.cloud_ovirt.ovirt_full_argument_spec( state=dict( choices=[ 'present', 'absent', 'maintenance', 'upgraded', 'started', 'restarted', 'stopped', 'reinstalled', 'iscsidiscover', 'iscsilogin' ], default='present', ), name=dict(required=True), id=dict(default=None), comment=dict(default=None), cluster=dict(default=None), address=dict(default=None), password=dict(default=None, no_log=True), public_key=dict(default=False, type='bool', aliases=['ssh_public_key']), kdump_integration=dict(default=None, choices=['enabled', 'disabled']), spm_priority=dict(default=None, type='int'), override_iptables=dict(default=None, type='bool'), force=dict(default=False, type='bool'), timeout=dict(default=600, type='int'), override_display=dict(default=None), kernel_params=dict(default=None, type='list'), hosted_engine=dict(default=None, choices=['deploy', 'undeploy']), power_management_enabled=dict(default=None, type='bool'), activate=dict(default=True, type='bool'), iscsi=dict(default=None, type='dict'), check_upgrade=dict(default=True, type='bool'), reboot_after_upgrade=dict(default=True, type='bool'), vgpu_placement=dict(default=None, choices=['consolidated', 'separated']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'iscsidiscover', ['iscsi']], ['state', 'iscsilogin', ['iscsi']] ] ) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) hosts_service = connection.system_service().hosts_service() hosts_module = HostsModule( connection=connection, module=module, service=hosts_service, ) state = module.params['state'] host = control_state(hosts_module) if state == 'present': ret = hosts_module.create( deploy_hosted_engine=( module.params.get('hosted_engine') == 'deploy' ) if module.params.get('hosted_engine') is not None else None, activate=module.params['activate'], result_state=(hoststate.MAINTENANCE if module.params['activate'] is False else hoststate.UP) if host is None else None, fail_condition=hosts_module.failed_state_after_reinstall if host is None else lambda h: False, ) if module.params['activate'] and host is not None: ret = hosts_module.action( action='activate', action_condition=lambda h: h.status != hoststate.UP, wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, ) elif state == 'absent': ret = hosts_module.remove() elif state == 'maintenance': hosts_module.action( action='deactivate', action_condition=lambda h: h.status != hoststate.MAINTENANCE, wait_condition=lambda h: h.status == hoststate.MAINTENANCE, fail_condition=failed_state, ) ret = hosts_module.create() elif state == 'upgraded': result_state = hoststate.MAINTENANCE if host.status == hoststate.MAINTENANCE else hoststate.UP events_service = connection.system_service().events_service() last_event = events_service.list(max=1)[0] if module.params['check_upgrade']: hosts_module.action( action='upgrade_check', action_condition=lambda host: not host.update_available, wait_condition=lambda host: host.update_available or ( len([ event for event in events_service.list( from_=int(last_event.id), search='type=885', # Uncomment when 4.1 is EOL, and remove the cond: # if host.name in event.description # search='type=885 and host.name=%s' % host.name, ) if host.name in event.description ]) > 0 ), fail_condition=lambda host: len([ event for event in events_service.list( from_=int(last_event.id), search='type=839 or type=887 and host.name=%s' % host.name, ) ]) > 0, ) # Set to False, because upgrade_check isn't 'changing' action: hosts_module._changed = False ret = hosts_module.action( action='upgrade', action_condition=lambda h: h.update_available, wait_condition=lambda h: h.status == result_state, post_action=lambda h: time.sleep(module.params['poll_interval']), fail_condition=lambda h: hosts_module.failed_state_after_reinstall(h) or ( len([ event for event in events_service.list( from_=int(last_event.id), # Fail upgrade if migration fails: # 17: Failed to switch Host to Maintenance mode # 65, 140: Migration failed # 166: No available host was found to migrate VM search='type=65 or type=140 or type=166 or type=17', ) if host.name in event.description ]) > 0 ), reboot=module.params['reboot_after_upgrade'], ) elif state == 'iscsidiscover': host_id = get_id_by_name(hosts_service, module.params['name']) iscsi_param = module.params['iscsi'] iscsi_targets = hosts_service.service(host_id).iscsi_discover( iscsi=otypes.IscsiDetails( port=int(iscsi_param.get('port', 3260)), username=iscsi_param.get('username'), password=iscsi_param.get('password'), address=iscsi_param.get('address'), ), ) ret = { 'changed': False, 'id': host_id, 'iscsi_targets': iscsi_targets, } elif state == 'iscsilogin': host_id = get_id_by_name(hosts_service, module.params['name']) iscsi_param = module.params['iscsi'] ret = hosts_module.action( action='iscsi_login', iscsi=otypes.IscsiDetails( port=int(iscsi_param.get('port', 3260)), username=iscsi_param.get('username'), password=iscsi_param.get('password'), address=iscsi_param.get('address'), target=iscsi_param.get('target'), ), ) elif state == 'started': ret = hosts_module.action( action='fence', action_condition=lambda h: h.status == hoststate.DOWN, wait_condition=lambda h: h.status in [hoststate.UP, hoststate.MAINTENANCE], fail_condition=hosts_module.failed_state_after_reinstall, fence_type='start', ) elif state == 'stopped': hosts_module.action( action='deactivate', action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN], wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN], fail_condition=failed_state, ) ret = hosts_module.action( action='fence', action_condition=lambda h: h.status != hoststate.DOWN, wait_condition=lambda h: h.status == hoststate.DOWN if module.params['wait'] else True, fail_condition=failed_state, fence_type='stop', ) elif state == 'restarted': ret = hosts_module.action( action='fence', wait_condition=lambda h: h.status == hoststate.UP, fail_condition=hosts_module.failed_state_after_reinstall, fence_type='restart', ) elif state == 'reinstalled': # Deactivate host if not in maintanence: hosts_module.action( action='deactivate', action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN], wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN], fail_condition=failed_state, ) # Reinstall host: hosts_module.action( action='install', action_condition=lambda h: h.status == hoststate.MAINTENANCE, post_action=hosts_module.post_reinstall, wait_condition=lambda h: h.status == hoststate.MAINTENANCE, fail_condition=hosts_module.failed_state_after_reinstall, host=otypes.Host( override_iptables=module.params['override_iptables'], ) if module.params['override_iptables'] else None, root_password=module.params['password'], ssh=otypes.Ssh( authentication_method=otypes.SshAuthenticationMethod.PUBLICKEY, ) if module.params['public_key'] else None, deploy_hosted_engine=( module.params.get('hosted_engine') == 'deploy' ) if module.params.get('hosted_engine') is not None else None, undeploy_hosted_engine=( module.params.get('hosted_engine') == 'undeploy' ) if module.params.get('hosted_engine') is not None else None, ) # Activate host after reinstall: ret = hosts_module.action( action='activate', action_condition=lambda h: h.status == hoststate.MAINTENANCE, wait_condition=lambda h: h.status == hoststate.UP, fail_condition=failed_state, ) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def build_entity(self): return otypes.InstanceType( id=self.param('id'), name=self.param('name'), console=( otypes.Console(enabled=self.param('serial_console')) ) if self.param('serial_console') is not None else None, usb=( otypes.Usb(enabled=self.param('usb_support')) ) if self.param('usb_support') is not None else None, high_availability=otypes.HighAvailability( enabled=self.param('high_availability'), priority=self.param('high_availability_priority'), ) if self.param('high_availability') is not None or self.param('high_availability_priority') else None, cpu=otypes.Cpu( topology=otypes.CpuTopology( cores=self.param('cpu_cores'), sockets=self.param('cpu_sockets'), threads=self.param('cpu_threads'), ) if any(( self.param('cpu_cores'), self.param('cpu_sockets'), self.param('cpu_threads') )) else None, cpu_tune=otypes.CpuTune( vcpu_pins=[ otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning') ], ) if self.param('cpu_pinning') else None, mode=otypes.CpuMode(self.param('cpu_mode')) if self.param( 'cpu_mode') else None, ) if any(( self.param('cpu_cores'), self.param('cpu_sockets'), self.param('cpu_threads'), self.param('cpu_mode'), self.param('cpu_pinning') )) else None, os=otypes.OperatingSystem( type=self.param('operating_system'), boot=otypes.Boot( devices=[ otypes.BootDevice(dev) for dev in self.param('boot_devices') ], ) if self.param('boot_devices') else None ), rng_device=otypes.RngDevice( source=otypes.RngSource(self.param('rng_device')), rate=otypes.Rate( bytes=self.param('rng_bytes'), period=self.param('rng_period') ) ) if self.param('rng_device') else None, memory=convert_to_bytes( self.param('memory') ) if self.param('memory') else None, virtio_scsi=otypes.VirtioScsi( enabled=self.param('virtio_scsi') ) if self.param('virtio_scsi') else None, memory_policy=otypes.MemoryPolicy( guaranteed=convert_to_bytes(self.param('memory_guaranteed')), ballooning=self.param('ballooning_enabled'), max=convert_to_bytes(self.param('memory_max')), ) if any(( self.param('memory_guaranteed'), self.param('ballooning_enabled') is not None, self.param('memory_max') )) else None, description=self.param('description'), placement_policy=otypes.VmPlacementPolicy( affinity=otypes.VmAffinity(self.param('placement_policy')), hosts=[ otypes.Host(name=self.param('host')), ] if self.param('host') else None, ) if self.param('placement_policy') else None, soundcard_enabled=self.param('soundcard_enabled'), display=otypes.Display( smartcard_enabled=self.param('smartcard_enabled') ) if self.param('smartcard_enabled') is not None else None, io=otypes.Io( threads=self.param('io_threads'), ) if self.param('io_threads') is not None else None, )