def restore_snapshot(module, vm_service, snapshots_service): changed = False snapshot_service = snapshots_service.snapshot_service( module.params['snapshot_id']) snapshot = get_entity(snapshot_service) if snapshot is None: raise Exception("Snapshot with id '%s' doesn't exist" % module.params['snapshot_id']) if snapshot.snapshot_status != otypes.SnapshotStatus.IN_PREVIEW: if not module.check_mode: snapshot_service.restore( restore_memory=module.params.get('use_memory'), ) changed = True else: if not module.check_mode: vm_service.commit_snapshot() changed = True if changed: wait( service=snapshot_service, condition=lambda snap: snap.snapshot_status == otypes. SnapshotStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) return { 'changed': changed, 'id': snapshot.id if snapshot else None, 'snapshot': get_dict_of_struct(snapshot), }
def create_snapshot(module, vm_service, snapshots_service): changed = False snapshot = get_entity( snapshots_service.snapshot_service(module.params['snapshot_id']) ) if snapshot is None: if not module.check_mode: snapshot = snapshots_service.add( otypes.Snapshot( description=module.params.get('description'), persist_memorystate=module.params.get('use_memory'), ) ) changed = True wait( service=snapshots_service.snapshot_service(snapshot.id), condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) return { 'changed': changed, 'id': snapshot.id, 'snapshot': get_dict_of_struct(snapshot), }
def create_snapshot(module, vm_service, snapshots_service, connection): changed = False snapshot = get_entity( snapshots_service.snapshot_service(module.params['snapshot_id'])) if snapshot is None: if not module.check_mode: disk_attachments_id = set( get_disk_attachment( disk, vm_service.disk_attachments_service().list(), connection).id for disk in module.params.get( 'disks')) if module.params.get('disks') else None snapshot = snapshots_service.add( otypes.Snapshot( description=module.params.get('description'), persist_memorystate=module.params.get('use_memory'), disk_attachments=[ otypes.DiskAttachment(id=da_id) for da_id in disk_attachments_id ] if disk_attachments_id else None)) changed = True wait( service=snapshots_service.snapshot_service(snapshot.id), condition=lambda snap: snap.snapshot_status == otypes. SnapshotStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) return { 'changed': changed, 'id': snapshot.id, 'snapshot': get_dict_of_struct(snapshot), }
def main(): argument_spec = ovirt_full_argument_spec( id=dict(default=None), state=dict( choices=['present', 'absent'], default='present', ), name=dict(required=True), template=dict(default=None), cluster=dict(default=None), description=dict(default=None), vm=dict(default=None, type='dict'), comment=dict(default=None), vm_per_user=dict(default=None, type='int'), prestarted=dict(default=None, type='int'), vm_count=dict(default=None, type='int'), type=dict(default=None, choices=['automatic', 'manual']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) check_params(module) try: auth = module.params.pop('auth') connection = create_connection(auth) vm_pools_service = connection.system_service().vm_pools_service() vm_pools_module = VmPoolsModule( connection=connection, module=module, service=vm_pools_service, ) state = module.params['state'] if state == 'present': ret = vm_pools_module.create() # Wait for all VM pool VMs to be created: if module.params['wait']: vms_service = connection.system_service().vms_service() for vm in vms_service.list(search='pool=%s' % module.params['name']): wait( service=vms_service.service(vm.id), condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP], timeout=module.params['timeout'], ) elif state == 'absent': ret = vm_pools_module.remove() module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def remove_snapshot(module, vm_service, snapshots_service, snapshot_id=None): changed = False if not snapshot_id: snapshot_id = module.params['snapshot_id'] snapshot = get_entity( snapshots_service.snapshot_service(snapshot_id) ) if snapshot: snapshot_service = snapshots_service.snapshot_service(snapshot.id) if not module.check_mode: snapshot_service.remove() changed = True wait( service=snapshot_service, condition=lambda snapshot: snapshot is None, wait=module.params['wait'], timeout=module.params['timeout'], ) return { 'changed': changed, 'id': snapshot.id if snapshot else None, 'snapshot': get_dict_of_struct(snapshot), }
def control_state(host_module): host = host_module.search_entity() if host is None: return state = host_module._module.params['state'] host_service = host_module._service.service(host.id) if failed_state(host): # In case host is in INSTALL_FAILED status, we can reinstall it: if hoststate.INSTALL_FAILED == host.status and state != 'reinstalled': raise Exception("Not possible to manage host '%s' in state '%s'." % (host.name, host.status)) elif host.status in [ hoststate.REBOOT, hoststate.CONNECTING, hoststate.INITIALIZING, hoststate.INSTALLING, hoststate.INSTALLING_OS, ]: wait( service=host_service, condition=lambda host: host.status == hoststate.UP, fail_condition=failed_state, ) elif host.status == hoststate.PREPARING_FOR_MAINTENANCE: wait( service=host_service, condition=lambda host: host.status == hoststate.MAINTENANCE, fail_condition=failed_state, ) return host
def _wait_for_UP(self, vm_service): wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.UP, wait=self._module.params['wait'], timeout=self._module.params['timeout'], )
def _wait_for_UP(self, vm_service): wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.UP, wait=self.param('wait'), timeout=self.param('timeout'), )
def transfer(connection, module, direction, transfer_func): transfers_service = connection.system_service().image_transfers_service() transfer = transfers_service.add( otypes.ImageTransfer( image=otypes.Image(id=module.params['id'], ), direction=direction, )) transfer_service = transfers_service.image_transfer_service(transfer.id) try: # After adding a new transfer for the disk, the transfer's status will be INITIALIZING. # Wait until the init phase is over. The actual transfer can start when its status is "Transferring". while transfer.phase == otypes.ImageTransferPhase.INITIALIZING: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() proxy_url = urlparse(transfer.proxy_url) context = ssl.create_default_context() auth = module.params['auth'] if auth.get('insecure'): context.check_hostname = False context.verify_mode = ssl.CERT_NONE elif auth.get('ca_file'): context.load_verify_locations(cafile=auth.get('ca_file')) proxy_connection = HTTPSConnection( proxy_url.hostname, proxy_url.port, context=context, ) transfer_func(transfer_service, proxy_connection, proxy_url, transfer.signed_ticket) return True finally: transfer_service.finalize() while transfer.phase in [ otypes.ImageTransferPhase.TRANSFERRING, otypes.ImageTransferPhase.FINALIZING_SUCCESS, ]: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() if transfer.phase in [ otypes.ImageTransferPhase.UNKNOWN, otypes.ImageTransferPhase.FINISHED_FAILURE, otypes.ImageTransferPhase.FINALIZING_FAILURE, otypes.ImageTransferPhase.CANCELLED, ]: raise Exception( "Error occured while uploading image. The transfer is in %s" % transfer.phase) if module.params.get('logical_unit'): disks_service = connection.system_service().disks_service() wait( service=disks_service.service(module.params['id']), condition=lambda d: d.status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], )
def post_reinstall(self, host): wait( service=self._service.service(host.id), condition=lambda h: h.status != hoststate.MAINTENANCE, fail_condition=failed_state, wait=self.param('wait'), timeout=self.param('timeout'), )
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent'], default='present', ), name=dict(default=None, required=True), template=dict(default=None), cluster=dict(default=None), description=dict(default=None), comment=dict(default=None), vm_per_user=dict(default=None, type='int'), prestarted=dict(default=None, type='int'), vm_count=dict(default=None, type='int'), type=dict(default=None, choices=['automatic', 'manual']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) check_params(module) try: auth = module.params.pop('auth') connection = create_connection(auth) vm_pools_service = connection.system_service().vm_pools_service() vm_pools_module = VmPoolsModule( connection=connection, module=module, service=vm_pools_service, ) state = module.params['state'] if state == 'present': ret = vm_pools_module.create() # Wait for all VM pool VMs to be created: if module.params['wait']: vms_service = connection.system_service().vms_service() for vm in vms_service.list(search='pool=%s' % module.params['name']): wait( service=vms_service.service(vm.id), condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP], timeout=module.params['timeout'], ) elif state == 'absent': ret = vm_pools_module.remove() module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def control_state(sd_module): sd = sd_module.search_entity() if sd is None: return sd_service = sd_module._service.service(sd.id) if sd.status == sdstate.LOCKED: wait( service=sd_service, condition=lambda sd: sd.status != sdstate.LOCKED, fail_condition=failed_state, ) if failed_state(sd): raise Exception("Not possible to manage storage domain '%s'." % sd.name) elif sd.status == sdstate.ACTIVATING: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, fail_condition=failed_state, ) elif sd.status == sdstate.DETACHING: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.UNATTACHED, fail_condition=failed_state, ) elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.MAINTENANCE, fail_condition=failed_state, )
def _maintenance(self, storage_domain): attached_sd_service = self._attached_sd_service(storage_domain) attached_sd = get_entity(attached_sd_service) if attached_sd and attached_sd.status != sdstate.MAINTENANCE: if not self._module.check_mode: attached_sd_service.deactivate() self.changed = True wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.MAINTENANCE, wait=self.param('wait'), timeout=self.param('timeout'), )
def post_create_check(self, sd_id): storage_domain = self._service.service(sd_id).get() self._service = self._attached_sds_service() # If storage domain isn't attached, attach it: attached_sd_service = self._service.service(storage_domain.id) if get_entity(attached_sd_service) is None: self._service.add(otypes.StorageDomain(id=storage_domain.id, ), ) self.changed = True # Wait until storage domain is in maintenance: wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, wait=self._module.params['wait'], timeout=self._module.params['timeout'], )
def _unattach(self, storage_domain): attached_sd_service = self._attached_sd_service(storage_domain) attached_sd = get_entity(attached_sd_service) if attached_sd and attached_sd.status == sdstate.MAINTENANCE: if not self._module.check_mode: # Detach the storage domain: attached_sd_service.remove() self.changed = True # Wait until storage domain is detached: wait( service=attached_sd_service, condition=lambda sd: sd is None, wait=self.param('wait'), timeout=self.param('timeout'), )
def _maintenance(self, storage_domain): attached_sds_service = self._attached_sds_service() if attached_sds_service is None: return attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id) attached_sd = attached_sd_service.get() if attached_sd and attached_sd.status != sdstate.MAINTENANCE: if not self._module.check_mode: attached_sd_service.deactivate() self.changed = True wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.MAINTENANCE, wait=self._module.params['wait'], timeout=self._module.params['timeout'], )
def post_create_check(self, sd_id): storage_domain = self._service.service(sd_id).get() self._service = self._attached_sds_service() # If storage domain isn't attached, attach it: attached_sd_service = self._service.service(storage_domain.id) if attached_sd_service.get() is None: self._service.add( otypes.StorageDomain( id=storage_domain.id, ), ) self.changed = True # Wait until storage domain is in maintenance: wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, wait=self._module.params['wait'], timeout=self._module.params['timeout'], )
def _unattach(self, storage_domain): attached_sds_service = self._attached_sds_service() if attached_sds_service is None: return attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id) attached_sd = attached_sd_service.get() if attached_sd and attached_sd.status == sdstate.MAINTENANCE: if not self._module.check_mode: # Detach the storage domain: attached_sd_service.remove() self.changed = True # Wait until storage domain is detached: wait( service=attached_sd_service, condition=lambda sd: sd is None, wait=self._module.params['wait'], timeout=self._module.params['timeout'], )
def post_create_check(self, sd_id): storage_domain = self._service.service(sd_id).get() dc_name = self.param('data_center') if not dc_name: # Find the DC, where the storage resides: dc_name = self._find_attached_datacenter_name(storage_domain.name) self._service = self._attached_sds_service(dc_name) # If storage domain isn't attached, attach it: attached_sd_service = self._service.service(storage_domain.id) if get_entity(attached_sd_service) is None: self._service.add(otypes.StorageDomain(id=storage_domain.id, ), ) self.changed = True # Wait until storage domain is in maintenance: wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, wait=self.param('wait'), timeout=self.param('timeout'), )
def control_state(host_module): host = host_module.search_entity() if host is None: return state = host_module._module.params['state'] host_service = host_module._service.service(host.id) if failed_state(host): # In case host is in INSTALL_FAILED status, we can reinstall it: if hoststate.INSTALL_FAILED == host.status and state != 'reinstalled': raise Exception( "Not possible to manage host '%s' in state '%s'." % ( host.name, host.status ) ) elif host.status in [ hoststate.REBOOT, hoststate.CONNECTING, hoststate.INITIALIZING, hoststate.INSTALLING, hoststate.INSTALLING_OS, ]: wait( service=host_service, condition=lambda host: host.status == hoststate.UP, fail_condition=failed_state, ) elif host.status == hoststate.PREPARING_FOR_MAINTENANCE: wait( service=host_service, condition=lambda host: host.status == hoststate.MAINTENANCE, fail_condition=failed_state, ) return host
def post_create_check(self, sd_id): storage_domain = self._service.service(sd_id).get() dc_name = self.param('data_center') if not dc_name: # Find the DC, where the storage resides: dc_name = self._find_attached_datacenter_name(storage_domain.name) self._service = self._attached_sds_service(dc_name) # If storage domain isn't attached, attach it: attached_sd_service = self._service.service(storage_domain.id) if get_entity(attached_sd_service) is None: self._service.add( otypes.StorageDomain( id=storage_domain.id, ), ) self.changed = True # Wait until storage domain is in maintenance: wait( service=attached_sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, wait=self.param('wait'), timeout=self.param('timeout'), )
def control_state(vm, vms_service, module): if vm is None: return force = module.params['force'] state = module.params['state'] vm_service = vms_service.vm_service(vm.id) if vm.status == otypes.VmStatus.IMAGE_LOCKED: wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.DOWN, ) elif vm.status == otypes.VmStatus.SAVING_STATE: # Result state is SUSPENDED, we should wait to be suspended: wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED, ) elif (vm.status == otypes.VmStatus.UNASSIGNED or vm.status == otypes.VmStatus.UNKNOWN): # Invalid states: module.fail_json( msg="Not possible to control VM, if it's in '{}' status".format( vm.status)) elif vm.status == otypes.VmStatus.POWERING_DOWN: if (force and state == 'stopped') or state == 'absent': vm_service.stop() wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.DOWN, ) else: # If VM is powering down, wait to be DOWN or UP. # VM can end in UP state in case there is no GA # or ACPI on the VM or shutdown operation crashed: wait( service=vm_service, condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP], )
def control_state(sd_module): sd = sd_module.search_entity() if sd is None: return sd_service = sd_module._service.service(sd.id) # In the case of no status returned, it's an attached storage domain. # Redetermine the corresponding serivce and entity: if sd.status is None: sd_service = sd_module._attached_sd_service(sd) sd = get_entity(sd_service) if sd.status == sdstate.LOCKED: wait( service=sd_service, condition=lambda sd: sd.status != sdstate.LOCKED, fail_condition=failed_state, ) if failed_state(sd): raise Exception("Not possible to manage storage domain '%s'." % sd.name) elif sd.status == sdstate.ACTIVATING: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.ACTIVE, fail_condition=failed_state, ) elif sd.status == sdstate.DETACHING: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.UNATTACHED, fail_condition=failed_state, ) elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE: wait( service=sd_service, condition=lambda sd: sd.status == sdstate.MAINTENANCE, fail_condition=failed_state, )
def wait_for_down(self, vm): """ This function will first wait for the status DOWN of the VM. Then it will find the active snapshot and wait until it's state is OK for stateless VMs and statless snaphot is removed. """ vm_service = self._service.vm_service(vm.id) wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.DOWN, wait=self.param('wait'), timeout=self.param('timeout'), ) if vm.stateless: snapshots_service = vm_service.snapshots_service() snapshots = snapshots_service.list() snap_active = [ snap for snap in snapshots if snap.snapshot_type == otypes.SnapshotType.ACTIVE ][0] snap_stateless = [ snap for snap in snapshots if snap.snapshot_type == otypes.SnapshotType.STATELESS ] # Stateless snapshot may be already removed: if snap_stateless: wait( service=snapshots_service.snapshot_service( snap_stateless[0].id), condition=lambda snap: snap is None, wait=self.param('wait'), timeout=self.param('timeout'), ) wait( service=snapshots_service.snapshot_service(snap_active.id), condition=lambda snap: snap.snapshot_status == otypes. SnapshotStatus.OK, wait=self.param('wait'), timeout=self.param('timeout'), ) return True
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached', 'exported'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), description=dict(default=None), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None,), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list'), profile=dict(default=None), quota_id=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), sparse=dict(default=None, type='bool'), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), openstack_volume_type=dict(default=None), image_provider=dict(default=None), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) if module._name == 'ovirt_disks': module.deprecate("The 'ovirt_disks' module is being renamed 'ovirt_disk'", version=2.8) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) lun = module.params.get('logical_unit') if lun: disk = _search_by_lun(disks_service, lun.get('id')) ret = None # First take care of creating the VM, if needed: if state in ('present', 'detached', 'attached'): ret = disks_module.create( entity=disk, result_state=otypes.DiskStatus.OK if lun is None else None, fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, ) is_new_disk = ret['changed'] ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] if disk is None else disk.id # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if ( module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) ): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) # Export disk as image to glance domain elif state == 'exported': disk = disks_module.search_entity() if disk is None: module.fail_json( msg="Can not export given disk '%s', it doesn't exist" % module.params.get('name') or module.params.get('id') ) if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='export', action_condition=lambda d: module.params['image_provider'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain(name=module.params['image_provider']), ) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and state != 'absent': vms_service = connection.system_service().vms_service() # If `vm_id` isn't specified, find VM by name: vm_id = module.params['vm_id'] if vm_id is None: vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None) if vm_id is None: module.fail_json( msg="VM don't exists, please create it first." ) disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None,), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list'), profile=dict(default=None), format=dict(default=None, choices=['raw', 'cow']), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] connection = create_connection(module.params.pop('auth')) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) lun = module.params.get('logical_unit') if lun: disk = _search_by_lun(disks_service, lun.get('id')) ret = None # First take care of creating the VM, if needed: if state == 'present' or state == 'detached' or state == 'attached': ret = disks_module.create( entity=disk, result_state=otypes.DiskStatus.OK if lun is None else None, ) ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] if disk is None else disk.id elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if module.params['vm_id'] or module.params['vm_name'] and state != 'absent': vms_service = connection.system_service().vms_service() # If `vm_id` isn't specified, find VM by name: vm_id = module.params['vm_id'] if vm_id is None: vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None) if vm_id is None: module.fail_json( msg="VM don't exists, please create it first." ) disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d:follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=False)
def main(): argument_spec = ovirt_full_argument_spec( state=dict(choices=['present', 'absent', 'attached', 'detached'], default='present'), id=dict(default=None), name=dict(default=None, aliases=['alias']), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None, ), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list'), profile=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) lun = module.params.get('logical_unit') if lun: disk = _search_by_lun(disks_service, lun.get('id')) ret = None # First take care of creating the VM, if needed: if state == 'present' or state == 'detached' or state == 'attached': ret = disks_module.create( entity=disk, result_state=otypes.DiskStatus.OK if lun is None else None, ) is_new_disk = ret['changed'] ret['changed'] = ret[ 'changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] if disk is None else disk.id # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if (module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force'])): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if module.params.get('vm_id') is not None or module.params.get( 'vm_name') is not None and state != 'absent': vms_service = connection.system_service().vms_service() # If `vm_id` isn't specified, find VM by name: vm_id = module.params['vm_id'] if vm_id is None: vm_id = getattr( search_by_name(vms_service, module.params['vm_name']), 'id', None) if vm_id is None: module.fail_json( msg="VM don't exists, please create it first.") disk_attachments_service = vms_service.vm_service( vm_id).disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk). status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def upload_disk_image(connection, module): size = os.path.getsize(module.params['image_path']) transfers_service = connection.system_service().image_transfers_service() transfer = transfers_service.add( otypes.ImageTransfer( image=otypes.Image( id=module.params['id'], ) ) ) transfer_service = transfers_service.image_transfer_service(transfer.id) try: # After adding a new transfer for the disk, the transfer's status will be INITIALIZING. # Wait until the init phase is over. The actual transfer can start when its status is "Transferring". while transfer.phase == otypes.ImageTransferPhase.INITIALIZING: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() # Set needed headers for uploading: upload_headers = { 'Authorization': transfer.signed_ticket, } proxy_url = urlparse(transfer.proxy_url) context = ssl.create_default_context() auth = module.params['auth'] if auth.get('insecure'): context.check_hostname = False context.verify_mode = ssl.CERT_NONE elif auth.get('ca_file'): context.load_verify_locations(cafile=auth.get('ca_file')) proxy_connection = HTTPSConnection( proxy_url.hostname, proxy_url.port, context=context, ) with open(module.params['image_path'], "rb") as disk: chunk_size = 1024 * 1024 * 8 pos = 0 while pos < size: transfer_service.extend() upload_headers['Content-Range'] = "bytes %d-%d/%d" % (pos, min(pos + chunk_size, size) - 1, size) proxy_connection.request( 'PUT', proxy_url.path, disk.read(chunk_size), headers=upload_headers, ) r = proxy_connection.getresponse() if r.status >= 400: raise Exception("Failed to upload disk image.") pos += chunk_size finally: transfer_service.finalize() while transfer.phase in [ otypes.ImageTransferPhase.TRANSFERRING, otypes.ImageTransferPhase.FINALIZING_SUCCESS, ]: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() if transfer.phase in [ otypes.ImageTransferPhase.UNKNOWN, otypes.ImageTransferPhase.FINISHED_FAILURE, otypes.ImageTransferPhase.FINALIZING_FAILURE, otypes.ImageTransferPhase.CANCELLED, ]: raise Exception( "Error occured while uploading image. The transfer is in %s" % transfer.phase ) if module.params.get('logical_unit'): disks_service = connection.system_service().disks_service() wait( service=disks_service.service(module.params['id']), condition=lambda d: d.status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) return True
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached', 'exported', 'imported'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), description=dict(default=None), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None,), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list'), profile=dict(default=None), quota_id=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), content_type=dict( default='data', choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration'] ), sparse=dict(default=None, type='bool'), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), openstack_volume_type=dict(default=None), image_provider=dict(default=None), host=dict(default=None), wipe_after_delete=dict(type='bool', default=None), activate=dict(default=None, type='bool'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) lun = module.params.get('logical_unit') host = module.params['host'] # Fail when host is specified with the LUN id. Lun id is needed to identify # an existing disk if already available inthe environment. if (host and lun is None) or (host and lun.get("id") is None): module.fail_json( msg="Can not use parameter host ({0!s}) without " "specifying the logical_unit id".format(host) ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) force_create = False vm_service = get_vm_service(connection, module) if lun: disk = _search_by_lun(disks_service, lun.get('id')) else: disk = disks_module.search_entity(search_params=searchable_attributes(module)) if vm_service and disk: # If the VM don't exist in VMs disks, but still it's found it means it was found # for template with same name as VM, so we should force create the VM disk. force_create = disk.id not in [a.disk.id for a in vm_service.disk_attachments_service().list() if a.disk] ret = None # First take care of creating the VM, if needed: if state in ('present', 'detached', 'attached'): # Always activate disk when its being created if vm_service is not None and disk is None: module.params['activate'] = True ret = disks_module.create( entity=disk if not force_create else None, result_state=otypes.DiskStatus.OK if lun is None else None, fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, force_create=force_create, ) is_new_disk = ret['changed'] ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if ( module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) ): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: if not module.check_mode: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) # Export disk as image to glance domain elif state == 'exported': disk = disks_module.search_entity() if disk is None: module.fail_json( msg="Can not export given disk '%s', it doesn't exist" % module.params.get('name') or module.params.get('id') ) if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='export', action_condition=lambda d: module.params['image_provider'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain(name=module.params['image_provider']), ) elif state == 'imported': glance_service = connection.system_service().openstack_image_providers_service() image_provider = search_by_name(glance_service, module.params['image_provider']) images_service = glance_service.service(image_provider.id).images_service() entity_id = get_id_by_name(images_service, module.params['name']) images_service.service(entity_id).import_( storage_domain=otypes.StorageDomain( name=module.params['storage_domain'] ) if module.params['storage_domain'] else None, disk=otypes.Disk( name=module.params['name'] ), import_as_template=False, ) # Wait for disk to appear in system: disk = disks_module.wait_for_import( condition=lambda t: t.status == otypes.DiskStatus.OK ) ret = disks_module.create(result_state=otypes.DiskStatus.OK) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if vm_service: disk_attachments_service = vm_service.disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() # When the host parameter is specified and the disk is not being # removed, refresh the information about the LUN. if state != 'absent' and host: hosts_service = connection.system_service().hosts_service() host_id = get_id_by_name(hosts_service, host) disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id)) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def transfer(connection, module, direction, transfer_func): transfers_service = connection.system_service().image_transfers_service() transfer = transfers_service.add( otypes.ImageTransfer( image=otypes.Image( id=module.params['id'], ), direction=direction, ) ) transfer_service = transfers_service.image_transfer_service(transfer.id) try: # After adding a new transfer for the disk, the transfer's status will be INITIALIZING. # Wait until the init phase is over. The actual transfer can start when its status is "Transferring". while transfer.phase == otypes.ImageTransferPhase.INITIALIZING: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() proxy_url = urlparse(transfer.proxy_url) context = ssl.create_default_context() auth = module.params['auth'] if auth.get('insecure'): context.check_hostname = False context.verify_mode = ssl.CERT_NONE elif auth.get('ca_file'): context.load_verify_locations(cafile=auth.get('ca_file')) proxy_connection = HTTPSConnection( proxy_url.hostname, proxy_url.port, context=context, ) transfer_func( transfer_service, proxy_connection, proxy_url, transfer.signed_ticket ) return True finally: transfer_service.finalize() while transfer.phase in [ otypes.ImageTransferPhase.TRANSFERRING, otypes.ImageTransferPhase.FINALIZING_SUCCESS, ]: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() if transfer.phase in [ otypes.ImageTransferPhase.UNKNOWN, otypes.ImageTransferPhase.FINISHED_FAILURE, otypes.ImageTransferPhase.FINALIZING_FAILURE, otypes.ImageTransferPhase.CANCELLED, ]: raise Exception( "Error occurred while uploading image. The transfer is in %s" % transfer.phase ) if module.params.get('logical_unit'): disks_service = connection.system_service().disks_service() wait( service=disks_service.service(module.params['id']), condition=lambda d: d.status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], )