def attach_disk(self, data_vm): # Find the services that manage the data and agent virtual machines: data_vm_service = self.vms_service.vm_service(data_vm.id) snap_description = '%s-bk-%s' % (data_vm.name, uuid.uuid4()) self.event( data_vm, types.LogSeverity.NORMAL, 'Backup of virtual machine \'{}\' using snapshot \'{}\' is ' 'starting.'.format(data_vm.name, snap_description)) # Send the request to create the snapshot. Note that this will return # before the snapshot is completely created, so we will later need to # wait till the snapshot is completely created. # The snapshot will not include memory. Change to True the parameter # persist_memorystate to get it (in that case the VM will be paused for a while). snaps_service = data_vm_service.snapshots_service() snap = snaps_service.add(snapshot=types.Snapshot( description=snap_description, persist_memorystate=False, ), ) logging.info( 'Sent request to create snapshot \'%s\', the id is \'%s\'.', snap.description, snap.id, ) # Poll and wait till the status of the snapshot is 'ok', which means # that it is completely created: snap_service = snaps_service.snapshot_service(snap.id) while snap.snapshot_status != types.SnapshotStatus.OK: time.sleep(1) snap = snap_service.get() logging.info('The snapshot is now complete.') time.sleep(15) # Retrieve the descriptions of the disks of the snapshot: snap_disks_service = snap_service.disks_service() snap_disks = snap_disks_service.list() # Attach all the disks of the snapshot to the agent virtual machine, and # save the resulting disk attachments in a list so that we can later # detach them easily: attachments_service = self.agent_vm_service.disk_attachments_service() attachments = [] for snap_disk in snap_disks: attachment = attachments_service.add( attachment=types.DiskAttachment( disk=types.Disk( id=snap_disk.id, snapshot=types.Snapshot(id=snap.id, ), ), active=True, bootable=False, interface=types.DiskInterface.VIRTIO, ), ) attachments.append(attachment) logging.info( 'Attached disk \'%s\' to the agent virtual machine.', attachment.disk.id, ) # Now the disks are attached to the virtual agent virtual machine, we # can then ask that virtual machine to perform the backup. Doing that # requires a mechanism to talk to the backup software that runs inside the # agent virtual machine. That is outside of the scope of the SDK. But if # the guest agent is installed in the virtual machine then we can # provide useful information, like the identifiers of the disks that have # just been attached. for attachment in attachments: if attachment.logical_name is not None: logging.info( 'Logical name for disk \'%s\' is \'%s\'.', attachment.disk.id, attachment.logicalname, ) else: logging.info( 'The logical name for disk \'%s\' isn\'t available. Is the ' 'guest agent installed?', attachment.disk.id, ) # We need to sleep here because the system needs time to scan the drives time.sleep(self.config["attach_wait_seconds"]) self.attachments = attachments self.attachments_service = attachments_service self.snap_description = snap_description self.snap_service = snap_service
# This example will connect to the server and create a new `floating` # disk, one that isn't attached to any virtual machine. # Then using transfer service it will transfer disk data from local # image to the newly created disk in server. progress("Connecting...") connection = common.create_connection(args) progress("Creating disk...") disks_service = connection.system_service().disks_service() disk = disks_service.add(disk=types.Disk( name=disk_info["name"], content_type=disk_info["content_type"], description='Uploaded disk', format=disk_info["format"], initial_size=disk_info["initial_size"], provisioned_size=disk_info["provisioned_size"], sparse=args.disk_sparse, backup=types.DiskBackup.INCREMENTAL if args.enable_backup else None, storage_domains=[types.StorageDomain(name=args.sd_name)])) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while True: time.sleep(1) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break progress("Disk ID: %s" % disk.id)
# Determine the volume format if props['volume-format'] == 'COW': disk_format = types.DiskFormat.COW else: disk_format = types.DiskFormat.RAW # Add the disk: disk = disks_service.add( disk=types.Disk( id=props['diskId'], name=props['disk-alias'], description=props['description'], format=disk_format, provisioned_size=int(props['capacity']) * 2**30, initial_size=int(props['populatedSize']), storage_domains=[ types.StorageDomain( name='scsi' ) ] ) ) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while disk_service.get().status != types.DiskStatus.OK: time.sleep(5) # Add a new image transfer:
def attach_disk(params, vm_name): vms_service = connection.system_service().vms_service() name_of_VM = 'name=' + str(vm_name) vm = vms_service.list(search=str(name_of_VM))[0] disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() if params["format"].lower() == "raw": disk_format = types.DiskFormat.RAW elif params["format"].lower() == "cow": disk_format = types.DiskFormat.COW else: return ("Cant determinate format of disk. Supported only RAW and COW") raise SaltCloudExecutionFailure if params["interface"].lower() == "virtio": disk_interface = types.DiskInterface.VIRTIO else: return ("Cant interface of disk. Supported only RAW and COW") raise SaltCloudExecutionFailure if "existing_disk" not in params: disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=params["name"], description=params["description"] if "description" in params else "Not provided", format=disk_format, provisioned_size=int(params["provisioned_size"]) * 2**30, storage_domains=[ types.StorageDomain(name=params["storage_domains"], ), ], ), interface=disk_interface, bootable=params["bootable"], active=params["active"], ), ) else: if params["existing_disk"] == True: disk_attachments_service.add( types.DiskAttachment( disk=types.Disk(id=params["id"], ), active=params["active"], interface=types.DiskInterface(disk_interface), bootable=params["bootable"], )) else: disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=params["name"], description=params["description"] if "description" in params else "Not provided", format=disk_format, provisioned_size=int(params["provisioned_size"]) * 2**30, storage_domains=[ types.StorageDomain( name=params["storage_domains"], ), ], ), interface=disk_interface, bootable=params["bootable"], active=params["active"], ), )
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'exported', 'imported'], default='present', ), name=dict(default=None, required=True), vm=dict(default=None), description=dict(default=None), cluster=dict(default=None), cpu_profile=dict(default=None), disks=dict(default=[], type='list'), clone_permissions=dict(type='bool'), export_domain=dict(default=None), storage_domain=dict(default=None), exclusive=dict(type='bool'), image_provider=dict(default=None), image_disk=dict(default=None), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) templates_service = connection.system_service().templates_service() templates_module = TemplatesModule( connection=connection, module=module, service=templates_service, ) state = module.params['state'] if state == 'present': ret = templates_module.create( result_state=otypes.TemplateStatus.OK, clone_permissions=module.params['clone_permissions'], ) elif state == 'absent': ret = templates_module.remove() elif state == 'exported': template = templates_module.search_entity() export_service = templates_module._get_export_domain_service() export_template = search_by_attributes( export_service.templates_service(), id=template.id) ret = templates_module.action( entity=template, action='export', action_condition=lambda t: export_template is None, wait_condition=lambda t: t is not None, post_action=templates_module.post_export_action, storage_domain=otypes.StorageDomain( id=export_service.get().id), exclusive=module.params['exclusive'], ) elif state == 'imported': template = templates_module.search_entity() if template: ret = templates_module.create( result_state=otypes.TemplateStatus.OK, ) else: kwargs = {} if module.params['image_provider']: kwargs.update( disk=otypes.Disk(name=module.params['image_disk']), template=otypes.Template(name=module.params['name'], ), import_as_template=True, ) if module.params['image_disk']: # We need to refresh storage domain to get list of images: templates_module._get_export_domain_service( ).images_service().list() glance_service = connection.system_service( ).openstack_image_providers_service() image_provider = search_by_name( glance_service, module.params['image_provider']) images_service = glance_service.service( image_provider.id).images_service() else: images_service = templates_module._get_export_domain_service( ).templates_service() template_name = module.params['image_disk'] or module.params[ 'name'] entity = search_by_name(images_service, template_name) if entity is None: raise Exception("Image/template '%s' was not found." % template_name) images_service.service(entity.id).import_( storage_domain=otypes.StorageDomain( name=module.params['storage_domain']) if module.params['storage_domain'] else None, cluster=otypes.Cluster(name=module.params['cluster']) if module.params['cluster'] else None, **kwargs) template = wait_for_import(module, templates_service) ret = { 'changed': True, 'id': template.id, 'template': get_dict_of_struct(template), } module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def create_transfer(connection, disk, host): """ Create image transfer and wait until the transfer is ready. Returns a transfer object. """ system_service = connection.system_service() transfers_service = system_service.image_transfers_service() extra = {} if transfer_supports_format(): extra["format"] = types.DiskFormat.RAW transfer = transfers_service.add( types.ImageTransfer( disk=types.Disk(id=disk.id), host=host, inactivity_timeout=3600, **extra, )) # At this point the transfer owns the disk and will delete the disk if the # transfer is canceled, or if finalizing the transfer fails. debug("transfer.id = %r" % transfer.id) # Get a reference to the created transfer service. transfer_service = transfers_service.image_transfer_service(transfer.id) # Wait until transfer's phase change from INITIALIZING to TRANSFERRING. On # errors transfer's phase can change to PAUSED_SYSTEM or FINISHED_FAILURE. # If the transfer was paused, we need to cancel it to remove the disk, # otherwise the system will remove the disk and transfer shortly after. endt = time.time() + timeout while True: time.sleep(1) try: transfer = transfer_service.get() except sdk.NotFoundError: # The system has removed the disk and the transfer. raise RuntimeError("transfer %s was removed" % transfer.id) if transfer.phase == types.ImageTransferPhase.FINISHED_FAILURE: # The system will remove the disk and the transfer soon. raise RuntimeError("transfer %s has failed" % transfer.id) if transfer.phase == types.ImageTransferPhase.PAUSED_SYSTEM: transfer_service.cancel() raise RuntimeError("transfer %s was paused by system" % transfer.id) if transfer.phase == types.ImageTransferPhase.TRANSFERRING: break if transfer.phase != types.ImageTransferPhase.INITIALIZING: transfer_service.cancel() raise RuntimeError("unexpected transfer %s phase %s" % (transfer.id, transfer.phase)) if time.time() > endt: transfer_service.cancel() raise RuntimeError("timed out waiting for transfer %s" % transfer.id) return transfer
# disks created on block storage domains, so that all the required # space is allocated upfront, otherwise the upload will eventually # fail. # # 3. The disk initial size must be bigger or the same as the size of the data # you will upload. print("Creating disk...") disk_format = types.DiskFormat.RAW disks_service = connection.system_service().disks_service() disk = disks_service.add( types.Disk(name=args.diskname, openstack_volume_type=types.OpenStackVolumeType( name=args.openstackvolumetype), description=args.description, format=disk_format, provisioned_size=image_info["virtual-size"], storage_domains=[types.StorageDomain(name=args.storagedomain)])) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while True: time.sleep(5) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break try: CURSOR_UP_ONE = '\x1b[1A' ERASE_LINE = '\x1b[2K'
def create_and_run_vm(): connection = get_connection() system_service = connection.system_service() storage_domains_service = system_service.storage_domains_service() storage_domain = storage_domains_service.list(search='name=' + storage_domain_name)[0] templates_service = system_service.templates_service() templates = templates_service.list(search='name=' + template_name) if not templates: print("could not find the required template: {}".format(template_name)) quit() template_id = None for template in templates: if template.version.version_number == template_version: template_id = template.id template_service = templates_service.template_service(template_id) disk_attachments = connection.follow_link( template_service.get().disk_attachments) disk = disk_attachments[0].disk vms_service = system_service.vms_service() vm = vms_service.add( types.Vm( name=VM_NAME, cluster=types.Cluster(name='Default'), template=types.Template(id=template_id), disk_attachments=[ types.DiskAttachment(disk=types.Disk( id=disk.id, format=types.DiskFormat.COW, storage_domains=[ types.StorageDomain(id=storage_domain.id, ), ], ), ), ], )) vm_service = vms_service.vm_service(vm.id) while True: time.sleep(10) vm = vm_service.get() if vm.status == types.VmStatus.DOWN: break vm_service.start() while True: time.sleep(10) vm = vm_service.get() if vm.status == types.VmStatus.UP: break connection.close()
system_service = connection.system_service() print("Creating disk...") if image_info["format"] == "qcow2": disk_format = types.DiskFormat.COW else: disk_format = types.DiskFormat.RAW disks_service = connection.system_service().disks_service() disk = disks_service.add( disk=types.Disk( name=os.path.basename(args.filepath), content_type=content_type, description='Trilio Vault', format=disk_format, initial_size=image_size, provisioned_size=image_info["virtual-size"], sparse=disk_format == types.DiskFormat.COW, storage_domains=[ types.StorageDomain( name=args.sdname ) ] ) ) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while True: time.sleep(5) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break
disk_format = types.DiskFormat.RAW else: disk_format = types.DiskFormat.COW progress("Connecting...") connection = common.create_connection(args) with closing(connection): disks_service = connection.system_service().disks_service() waiting = set() for i in range(args.count): disk = disks_service.add(disk=types.Disk( name="disk-%s" % i, content_type=types.DiskContentType.DATA, description='Created by add_disks.py', format=disk_format, provisioned_size=args.size, sparse=args.sparse, storage_domains=[types.StorageDomain(name=args.sd_name)])) progress("Created disk %s id=%s" % (i, disk.id)) waiting.add(disk.id) progress("Waiting until disks are ready...") while waiting: time.sleep(1) for disk_id in list(waiting): disk = disks_service.disk_service(disk_id).get() if disk.status == types.DiskStatus.OK: progress("Disk %s is ready" % disk_id) waiting.remove(disk_id)
# # 2. The disk initial size must be bigger or the same as the size of the data # you will upload. #cmd="qemu-img info "+ qcowfile + "|grep 'virtual size'|awk '{print $4}'|sed 's/(//g'" #size=int(subprocess.check_output(cmd, shell=True)) utils = virtbkp_utils.virtbkp_utils() provisioned_size = utils.get_qcow_size(qcowfile) disks_service = connection.system_service().disks_service() disk = disks_service.add( disk=types.Disk( name=qcowfile, description=qcowfile, format=types.DiskFormat.COW, provisioned_size=provisioned_size, storage_domains=[ types.StorageDomain( name=storagedomain ) ] ) ) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while True: #time.sleep(5) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break
def add_disk(self, name, size, pool=None, thin=True, template=None, shareable=False, existing=None): """ :param name: :param size: :param pool: :param thin: :param template: :param shareable: :param existing: :return: """ size *= 2**30 system_service = self.conn.system_service() sds_service = system_service.storage_domains_service() poolcheck = sds_service.list(search='name=%s' % pool) if not poolcheck: return {'result': 'failure', 'reason': "Pool %s not found" % pool} vmsearch = self.vms_service.list(search='name=%s' % name) if not vmsearch: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} vm = self.vms_service.vm_service(vmsearch[0].id) disk_attachments_service = vm.disk_attachments_service() currentdisk = len(disk_attachments_service.list()) diskindex = currentdisk + 1 diskname = '%s_Disk%s' % (name, diskindex) disk_attachment = disk_attachments_service.add( types.DiskAttachment(disk=types.Disk( name=diskname, format=types.DiskFormat.COW, provisioned_size=size, storage_domains=[types.StorageDomain(name=pool)]), interface=types.DiskInterface.VIRTIO, bootable=False, active=True)) disks_service = self.conn.system_service().disks_service() disk_service = disks_service.disk_service(disk_attachment.disk.id) timeout = 0 while True: disk = disk_service.get() if disk.status == types.DiskStatus.OK: break else: timeout += 5 sleep(5) common.pprint("Waiting for disk %s to be ready" % diskname, color='green') if timeout > 40: return { 'result': 'failure', 'reason': 'timeout waiting for disk %s to be ready' % diskname }
def snapshot_merge(api): engine = api.system_service() vm0_snapshots_service = test_utils.get_vm_snapshots_service(engine, VM0_NAME) disk = engine.disks_service().list(search='name={}'.format(DISK0_NAME))[0] dead_snap1_params = types.Snapshot( description='dead_snap1', persist_memorystate=False, disk_attachments=[ types.DiskAttachment( disk=types.Disk( id=disk.id ) ) ] ) correlation_id = uuid.uuid4() vm0_snapshots_service.add( dead_snap1_params, query={'correlation_id': correlation_id} ) testlib.assert_true_within_short( lambda: test_utils.all_jobs_finished(engine, correlation_id) ) testlib.assert_true_within_short( lambda: vm0_snapshots_service.list()[-1].snapshot_status == types.SnapshotStatus.OK ) dead_snap2_params = types.Snapshot( description='dead_snap2', persist_memorystate=False, disk_attachments=[ types.DiskAttachment( disk=types.Disk( id=disk.id ) ) ] ) correlation_id_snap2 = uuid.uuid4() vm0_snapshots_service.add( dead_snap2_params, query={'correlation_id': correlation_id_snap2} ) testlib.assert_true_within_short( lambda: test_utils.all_jobs_finished(engine, correlation_id_snap2) ) testlib.assert_true_within_short( lambda: vm0_snapshots_service.list()[-1].snapshot_status == types.SnapshotStatus.OK ) snapshot = vm0_snapshots_service.list()[-2] vm0_snapshots_service.snapshot_service(snapshot.id).remove() testlib.assert_true_within_short( lambda: (len(vm0_snapshots_service.list()) == 2) and (vm0_snapshots_service.list()[-1].snapshot_status == types.SnapshotStatus.OK), )
def create_snap(self,vmid,snapname,my_disk): vm_service = self.connection.service("vms") snapshots_service = vm_service.vm_service(vmid).snapshots_service() snapshots_service.add(types.Snapshot(description=snapname,persist_memorystate=False, disk_attachments=[ types.DiskAttachment( disk=types.Disk( id=my_disk)) ])) snapid = self.get_snap_id(vmid) status = self.get_snap_status(vmid,snapid) printf.INFO("Trying to create snapshot of VM: " + vmid) while str(status) == "locked": time.sleep(10) printf.INFO("Waiting until snapshot creation ends") status = self.get_snap_status(vmid,snapid) printf.OK("Snapshot created")
snap = snap_service.get() logging.info('The snapshot is now complete.') # Retrieve the descriptions of the disks of the snapshot: snap_disks_service = snap_service.disks_service() snap_disks = snap_disks_service.list() # Attach all the disks of the snapshot to the agent virtual machine, and # save the resulting disk attachments in a list so that we can later # detach them easily: attachments_service = agent_vm_service.disk_attachments_service() attachments = [] for snap_disk in snap_disks: attachment = attachments_service.add(attachment=types.DiskAttachment( disk=types.Disk( id=snap_disk.id, snapshot=types.Snapshot(id=snap.id, ), ), active=True, bootable=False, interface=types.DiskInterface.VIRTIO, ), ) attachments.append(attachment) logging.info( 'Attached disk \'%s\' to the agent virtual machine.', attachment.disk.id, ) # Now the disks are attached to the virtual agent virtual machine, we # can then ask that virtual machine to perform the backup. Doing that # requires a mechanism to talk to the backup software that runs inside the # agent virtual machine. That is outside of the scope of the SDK. But if
debug=True, log=logging.getLogger(), ) # Get the reference to the root service: system_service = connection.system_service() print("Creating disk...") image_size = os.path.getsize(args.filename) disks_service = connection.system_service().disks_service() disk = disks_service.add( disk=types.Disk(name=os.path.basename(args.filename), content_type=image_info["content_type"], description='Uploaded disk', format=new_disk_format, initial_size=image_size, provisioned_size=image_info["virtual-size"], sparse=new_disk_format == types.DiskFormat.COW, storage_domains=[types.StorageDomain(name=args.sd_name)])) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while True: time.sleep(5) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break print("Creating transfer session...")
# Locate the service that manages the disk attachments of the virtual # machine: disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() # Use the "add" method of the disk attachments service to add the LUN disk. disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name='myiscsidisk', lun_storage=types.HostStorage( type=types.StorageType.ISCSI, logical_units=[ types.LogicalUnit( address='192.168.1.1', port=3260, target='iqn.2017-05.org.ovirt:storage', id='36001405d6c6cbba754c4b568d843ff6a', username='******', password='******', ) ], ), ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True, ), ) # Close the connection to the server: connection.close()
def build_entity(self): hosts_service = self._connection.system_service().hosts_service() logical_unit = self._module.params.get('logical_unit') size = convert_to_bytes(self._module.params.get('size')) if not size and self._module.params.get('upload_image_path'): out = subprocess.check_output([ "qemu-img", "info", "--output", "json", self._module.params.get('upload_image_path') ]) image_info = json.loads(out) size = image_info["virtual-size"] disk = otypes.Disk( id=self._module.params.get('id'), name=self._module.params.get('name'), description=self._module.params.get('description'), format=otypes.DiskFormat(self._module.params.get('format')) if self._module.params.get('format') else None, content_type=otypes.DiskContentType( self._module.params.get('content_type')) if self._module.params.get('content_type') else None, sparse=self._module.params.get('sparse') if self._module.params.get('sparse') is not None else self._module.params.get('format') != 'raw', openstack_volume_type=otypes.OpenStackVolumeType( name=self.param('openstack_volume_type')) if self.param('openstack_volume_type') else None, provisioned_size=size, storage_domains=[ otypes.StorageDomain( name=self._module.params.get('storage_domain'), ), ], quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') else None, shareable=self._module.params.get('shareable'), sgio=otypes.ScsiGenericIO(self.param('scsi_passthrough')) if self.param('scsi_passthrough') else None, propagate_errors=self.param('propagate_errors'), backup=otypes.DiskBackup(self.param('backup')) if self.param('backup') else None, wipe_after_delete=self.param('wipe_after_delete'), lun_storage=otypes.HostStorage( host=otypes.Host(id=get_id_by_name( hosts_service, self._module.params.get('host'))) if self.param('host') else None, type=otypes.StorageType( logical_unit.get('storage_type', 'iscsi')), logical_units=[ otypes.LogicalUnit( address=logical_unit.get('address'), port=logical_unit.get('port', 3260), target=logical_unit.get('target'), id=logical_unit.get('id'), username=logical_unit.get('username'), password=logical_unit.get('password'), ) ], ) if logical_unit else None, ) if hasattr( disk, 'initial_size') and self._module.params['upload_image_path']: out = subprocess.check_output([ 'qemu-img', 'measure', '-O', 'qcow2' if self._module.params.get('format') == 'cow' else 'raw', '--output', 'json', self._module.params['upload_image_path'] ]) measure = json.loads(out) disk.initial_size = measure["required"] return disk
def deploy(self, vm_name, cluster, timeout=900, power_on=True, **kwargs): """ Deploy a VM using this template Args: vm_name -- name of VM to create cluster -- cluster name to which VM should be deployed timeout (optional) -- default 900 power_on (optional) -- default True placement_policy_host (optional) placement_policy_affinity (optional) cpu (optional) -- number of cpu cores sockets (optional) -- numbner of cpu sockets ram (optional) -- memory in GB storage_domain (optional) -- storage domain name to which VM should be deployed Returns: wrapanapi.systems.rhevm.RHEVMVirtualMachine """ self.logger.debug(' Deploying RHEV template %s to VM %s', self.name, vm_name) vm_kwargs = { 'name': vm_name, 'cluster': self.system.get_cluster(cluster), 'template': self.raw, } clone = None domain_name = kwargs.get('storage_domain') if domain_name: # need to specify storage domain, if its different than the template's disks location # then additional options required. disk allocation mode in UI required to be clone clone = True target_storage_domain = self.system.get_storage_domain(domain_name) disk_attachments = [] for template_attachment in self.api.disk_attachments_service( ).list(): new_attachment = types.DiskAttachment( disk=types.Disk(id=template_attachment.id, format=types.DiskFormat.COW, storage_domains=[target_storage_domain])) disk_attachments.append(new_attachment) vm_kwargs['disk_attachments'] = disk_attachments # Placement requires two args if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = types.Host(name=kwargs['placement_policy_host']) policy = types.VmPlacementPolicy( hosts=[host], affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy # if cpu is passed, also default a sockets # unless its passed cpu = kwargs.get('cpu', None) # don't set default if its not passed if cpu: vm_kwargs['cpu'] = types.Cpu(topology=types.CpuTopology( cores=cpu, sockets=kwargs.get('sockets', 1))) if 'ram' in kwargs: vm_kwargs['memory'] = int(kwargs['ram']) # in Bytes vms_service = self.system.api.system_service().vms_service() vms_service.add(types.Vm(**vm_kwargs), clone=clone) vm = self.system.get_vm(vm_name) vm.wait_for_state(VmState.STOPPED, timeout=timeout) if power_on: vm.start() return vm
) # Get the reference to the root service: system_service = connection.system_service() print("Creating disk...") disks_service = connection.system_service().disks_service() disk = disks_service.add( disk=types.Disk( name=disk_info["name"], content_type=disk_info["content_type"], description='Uploaded disk', format=disk_info["format"], initial_size=disk_info["initial_size"], provisioned_size=disk_info["provisioned_size"], sparse=args.disk_sparse, storage_domains=[ types.StorageDomain( name=args.sd_name ) ] ) ) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while True: time.sleep(1) disk = disk_service.get() if disk.status == types.DiskStatus.OK:
def add_disk( self, vm, size, disk_format=RHV_DISK_FORMAT_RAW, disk_interface=RHV_DISK_INTERFACE_VIRTIO_SCSI, sparse=None, pass_discard=None, storage_domain_id=None, timeout=120, ): """ Attaches disk to VM Args: vm (types.Vm): Vm instance size (int) : size of disk in GB disk_format (str): underlying storage format of disks (default: "RAW") disk_interface (str): underlying storage interface of disks communication with controller (default: 'VIRTIO_SCSI') sparse (bool): disk allocation policy. True for sparse, false for preallocated (default: None) pass_discard (bool): True if the virtual machine passes discard commands to the storage, False otherwise (default: None) storage_domain_id (str): A unique identifier for the storage domain timeout (int): The timeout in seconds for disk status OK (default: 120) """ logger.info(f"Adding disk to {vm.name}") disk_size_bytes = int(size) * GB storage_domain_id = (storage_domain_id or config.ENV_DATA["ovirt_storage_domain_id"]) disk_attachments_service = self.get_disk_attachments_service(vm.id) disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( format=getattr(types.DiskFormat, disk_format), provisioned_size=disk_size_bytes, sparse=sparse, storage_domains=[ types.StorageDomain(id=storage_domain_id, ), ], ), interface=getattr(types.DiskInterface, disk_interface), bootable=False, active=True, pass_discard=pass_discard, ), ) # Wait for the disk to reach OK: disk_service = self.get_disk_service(disk_attachment.disk.id) try: for sample in TimeoutSampler(timeout, 3, disk_service.get): logger.info(f"Waiting for disk status to be OK. " f"Current disk status: {sample.status}") if sample.status == types.DiskStatus.OK: logger.info(f"Disk {sample.name} reached OK status") break except TimeoutExpiredError: logger.error( f"Disk {sample.name} failed to get attached to {vm.name}") raise logger.info(f"{size}GB disk added successfully to {vm.name}")
# 3. The disk initial size must be bigger or the same as the size of the data # you will upload. print("Creating disk...") if image_info["format"] == "qcow2": disk_format = types.DiskFormat.COW else: disk_format = types.DiskFormat.RAW disks_service = connection.system_service().disks_service() disk = disks_service.add( disk=types.Disk(name=os.path.basename(image_path), content_type=content_type, description='Uploaded disk', format=disk_format, initial_size=image_size, provisioned_size=image_info["virtual-size"], sparse=disk_format == types.DiskFormat.COW, storage_domains=[types.StorageDomain(name='mydata')])) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) while True: time.sleep(5) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break print("Creating transfer session...")
def open(readonly): # Parse out the username from the output_conn URL. parsed = urlparse(params['output_conn']) username = parsed.username or "admin@internal" # Read the password from file. with builtins.open(params['output_password'], 'r') as fp: password = fp.read() password = password.rstrip() # Connect to the server. connection = sdk.Connection( url=params['output_conn'], username=username, password=password, ca_file=params['rhv_cafile'], log=logging.getLogger(), insecure=params['insecure'], ) system_service = connection.system_service() # Create the disk. disks_service = system_service.disks_service() if params['disk_format'] == "raw": disk_format = types.DiskFormat.RAW else: disk_format = types.DiskFormat.COW disk = disks_service.add(disk=types.Disk( name=params['disk_name'], description="Uploaded by virt-v2v", format=disk_format, initial_size=params['disk_size'], provisioned_size=params['disk_size'], # XXX Ignores params['output_sparse']. # Handling this properly will be complex, see: # https://www.redhat.com/archives/libguestfs/2018-March/msg00177.html sparse=True, storage_domains=[types.StorageDomain(name=params['output_storage'], )], )) # Wait till the disk is up, as the transfer can't start if the # disk is locked: disk_service = disks_service.disk_service(disk.id) debug("disk.id = %r" % disk.id) endt = time.time() + timeout while True: time.sleep(5) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break if time.time() > endt: raise RuntimeError("timed out waiting for disk to become unlocked") # Get a reference to the transfer service. transfers_service = system_service.image_transfers_service() # Create a new image transfer, using the local host is possible. host = find_host(connection) if params['rhv_direct'] else None transfer = transfers_service.add( types.ImageTransfer( disk=types.Disk(id=disk.id), host=host, inactivity_timeout=3600, )) debug("transfer.id = %r" % transfer.id) # Get a reference to the created transfer service. transfer_service = transfers_service.image_transfer_service(transfer.id) # After adding a new transfer for the disk, the transfer's status # will be INITIALIZING. Wait until the init phase is over. The # actual transfer can start when its status is "Transferring". endt = time.time() + timeout while True: time.sleep(5) transfer = transfer_service.get() if transfer.phase != types.ImageTransferPhase.INITIALIZING: break if time.time() > endt: raise RuntimeError("timed out waiting for transfer status " + "!= INITIALIZING") # Now we have permission to start the transfer. if params['rhv_direct']: if transfer.transfer_url is None: raise RuntimeError("direct upload to host not supported, " + "requires ovirt-engine >= 4.2 and only works " + "when virt-v2v is run within the oVirt/RHV " + "environment, eg. on an oVirt node.") destination_url = urlparse(transfer.transfer_url) else: destination_url = urlparse(transfer.proxy_url) context = ssl.create_default_context() context.load_verify_locations(cafile=params['rhv_cafile']) http = HTTPSConnection(destination_url.hostname, destination_url.port, context=context) # The first request is to fetch the features of the server. # Authentication was needed only for GET and PUT requests when # communicating with old imageio-proxy. needs_auth = not params['rhv_direct'] can_flush = False can_trim = False can_zero = False unix_socket = None http.request("OPTIONS", destination_url.path) r = http.getresponse() data = r.read() if r.status == 200: # New imageio never needs authentication. needs_auth = False j = json.loads(data) can_flush = "flush" in j['features'] can_trim = "trim" in j['features'] can_zero = "zero" in j['features'] unix_socket = j.get('unix_socket') # Old imageio servers returned either 405 Method Not Allowed or # 204 No Content (with an empty body). If we see that we leave # all the features as False and they will be emulated. elif r.status == 405 or r.status == 204: pass else: raise RuntimeError("could not use OPTIONS request: %d: %s" % (r.status, r.reason)) debug("imageio features: flush=%r trim=%r zero=%r unix_socket=%r" % (can_flush, can_trim, can_zero, unix_socket)) # If we are connected to imageio on the local host and the # transfer features a unix_socket then we can reconnect to that. if host is not None and unix_socket is not None: try: http = UnixHTTPConnection(unix_socket) except Exception as e: # Very unlikely failure, but we can recover by using the https # connection. debug("cannot create unix socket connection, using https: %s" % e) else: debug("optimizing connection using unix socket %r" % unix_socket) # Save everything we need to make requests in the handle. return { 'can_flush': can_flush, 'can_trim': can_trim, 'can_zero': can_zero, 'connection': connection, 'disk': disk, 'disk_service': disk_service, 'failed': False, 'highestwrite': 0, 'http': http, 'needs_auth': needs_auth, 'path': destination_url.path, 'transfer': transfer, 'transfer_service': transfer_service, }
# Locate the service that manages the disk attachments of the virtual # machine: disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() # Use the "add" method of the disk attachments service to add the disk. # Note that the size of the disk, the `provisioned_size` attribute, is # specified in bytes, so to create a disk of 10 GiB the value should # be 10 * 2^30. disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name='mydisk', description='my disk', format=types.DiskFormat.COW, provisioned_size=10 * 2**30, storage_domains=[ types.StorageDomain(name='bs-scsi-012', ), ], ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True, ), ) # Find the service that manages the disk attachment that was added in the # previous step: disk_attachment_service = disk_attachments_service.attachment_service( disk_attachment.id) # Wait till the disk is OK:
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached', 'exported', 'imported'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), description=dict(default=None), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None, choices=['virtio', 'ide', 'virtio_scsi']), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list', elements='str'), profile=dict(default=None), quota_id=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), content_type=dict( default='data', choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration'] ), backup=dict(default=None, type='str', choices=['incremental']), sparse=dict(default=None, type='bool'), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), scsi_passthrough=dict(default=None, type='str', choices=['disabled', 'filtered', 'unfiltered']), uses_scsi_reservation=dict(default=None, type='bool'), pass_discard=dict(default=None, type='bool'), propagate_errors=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), openstack_volume_type=dict(default=None), image_provider=dict(default=None), host=dict(default=None), wipe_after_delete=dict(type='bool', default=None), activate=dict(default=None, type='bool'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) lun = module.params.get('logical_unit') host = module.params['host'] # Fail when host is specified with the LUN id. Lun id is needed to identify # an existing disk if already available inthe environment. if (host and lun is None) or (host and lun.get("id") is None): module.fail_json( msg="Can not use parameter host ({0!s}) without " "specifying the logical_unit id".format(host) ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) force_create = False vm_service = get_vm_service(connection, module) if lun: disk = _search_by_lun(disks_service, lun.get('id')) else: disk = disks_module.search_entity(search_params=searchable_attributes(module)) if vm_service and disk: # If the VM don't exist in VMs disks, but still it's found it means it was found # for template with same name as VM, so we should force create the VM disk. force_create = disk.id not in [a.disk.id for a in vm_service.disk_attachments_service().list() if a.disk] ret = None # First take care of creating the VM, if needed: if state in ('present', 'detached', 'attached'): # Always activate disk when its being created if vm_service is not None and disk is None: module.params['activate'] = module.params['activate'] is None or module.params['activate'] ret = disks_module.create( entity=disk if not force_create else None, result_state=otypes.DiskStatus.OK if lun is None else None, fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, force_create=force_create, _wait=True if module.params['upload_image_path'] else module.params['wait'], ) is_new_disk = ret['changed'] ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): if module.params['format'] == 'cow' and module.params['content_type'] == 'iso': module.warn("To upload an ISO image 'format' parameter needs to be set to 'raw'.") uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if ( module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) ): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: if not module.check_mode: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) # Export disk as image to glance domain elif state == 'exported': disk = disks_module.search_entity() if disk is None: module.fail_json( msg="Can not export given disk '%s', it doesn't exist" % module.params.get('name') or module.params.get('id') ) if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='export', action_condition=lambda d: module.params['image_provider'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain(name=module.params['image_provider']), ) elif state == 'imported': glance_service = connection.system_service().openstack_image_providers_service() image_provider = search_by_name(glance_service, module.params['image_provider']) images_service = glance_service.service(image_provider.id).images_service() entity_id = get_id_by_name(images_service, module.params['name']) images_service.service(entity_id).import_( storage_domain=otypes.StorageDomain( name=module.params['storage_domain'] ) if module.params['storage_domain'] else None, disk=otypes.Disk( name=module.params['name'] ), import_as_template=False, ) # Wait for disk to appear in system: disk = disks_module.wait_for_import( condition=lambda t: t.status == otypes.DiskStatus.OK ) ret = disks_module.create(result_state=otypes.DiskStatus.OK) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if vm_service: disk_attachments_service = vm_service.disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() # When the host parameter is specified and the disk is not being # removed, refresh the information about the LUN. if state != 'absent' and host: hosts_service = connection.system_service().hosts_service() host_id = get_id_by_name(hosts_service, host) disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id)) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
# the template using a format different to the format used by the # original disks. attachments = connection.follow_link(vm.disk_attachments) disk_ids = [attachment.disk.id for attachment in attachments] # Send the request to create the template. Note that the way to specify # the original virtual machine, and the customizations, is to use the # 'vm' attribute of the 'Template' type. In the customization we # explicitly indicate that we want COW disks, regardless of what format # the original disks had. templates_service = system_service.templates_service() template = templates_service.add(template=types.Template( name='mytemplate', vm=types.Vm(id=vm.id, disk_attachments=[ types.DiskAttachment(disk=types.Disk( id=disk_id, sparse=True, format=types.DiskFormat.COW)) for disk_id in disk_ids ]))) # Wait till the status of the template is OK, as that means that it is # completely created and ready to use: template_service = templates_service.template_service(template.id) while True: time.sleep(5) template = template_service.get() if template.status == types.TemplateStatus.OK: break # Close the connection to the server: connection.close()
def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=[ 'present', 'absent', 'exported', 'imported', 'registered' ], default='present', ), id=dict(default=None), name=dict(default=None), vm=dict(default=None), description=dict(default=None), cluster=dict(default=None), allow_partial_import=dict(default=None, type='bool'), cpu_profile=dict(default=None), disks=dict(default=[], type='list'), clone_permissions=dict(type='bool'), export_domain=dict(default=None), storage_domain=dict(default=None), exclusive=dict(type='bool'), image_provider=dict(default=None), image_disk=dict(default=None, aliases=['glance_image_disk_name']), io_threads=dict(type='int', default=None), template_image_disk_name=dict(default=None), seal=dict(type='bool'), vnic_profile_mappings=dict(default=[], type='list'), cluster_mappings=dict(default=[], type='list'), role_mappings=dict(default=[], type='list'), domain_mappings=dict(default=[], type='list'), operating_system=dict(type='str'), memory=dict(type='str'), memory_guaranteed=dict(type='str'), memory_max=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['id', 'name']], ) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) templates_service = connection.system_service().templates_service() templates_module = TemplatesModule( connection=connection, module=module, service=templates_service, ) state = module.params['state'] if state == 'present': ret = templates_module.create( result_state=otypes.TemplateStatus.OK, search_params=searchable_attributes(module), clone_permissions=module.params['clone_permissions'], seal=module.params['seal'], ) elif state == 'absent': ret = templates_module.remove() elif state == 'exported': template = templates_module.search_entity() export_service = templates_module._get_export_domain_service() export_template = search_by_attributes( export_service.templates_service(), id=template.id) ret = templates_module.action( entity=template, action='export', action_condition=lambda t: export_template is None or module. params['exclusive'], wait_condition=lambda t: t is not None, post_action=templates_module.post_export_action, storage_domain=otypes.StorageDomain( id=export_service.get().id), exclusive=module.params['exclusive'], ) elif state == 'imported': template = templates_module.search_entity() if template: ret = templates_module.create( result_state=otypes.TemplateStatus.OK, ) else: kwargs = {} if module.params['image_provider']: kwargs.update( disk=otypes.Disk( name=module.params['template_image_disk_name'] or module.params['image_disk']), template=otypes.Template(name=module.params['name'], ), import_as_template=True, ) if module.params['image_disk']: # We need to refresh storage domain to get list of images: templates_module._get_export_domain_service( ).images_service().list() glance_service = connection.system_service( ).openstack_image_providers_service() image_provider = search_by_name( glance_service, module.params['image_provider']) images_service = glance_service.service( image_provider.id).images_service() else: images_service = templates_module._get_export_domain_service( ).templates_service() template_name = module.params['image_disk'] or module.params[ 'name'] entity = search_by_name(images_service, template_name) if entity is None: raise Exception("Image/template '%s' was not found." % template_name) images_service.service(entity.id).import_( storage_domain=otypes.StorageDomain( name=module.params['storage_domain']) if module.params['storage_domain'] else None, cluster=otypes.Cluster(name=module.params['cluster']) if module.params['cluster'] else None, **kwargs) # Wait for template to appear in system: template = templates_module.wait_for_import( condition=lambda t: t.status == otypes.TemplateStatus.OK) ret = templates_module.create( result_state=otypes.TemplateStatus.OK) ret = { 'changed': True, 'id': template.id, 'template': get_dict_of_struct(template), } elif state == 'registered': storage_domains_service = connection.system_service( ).storage_domains_service() # Find the storage domain with unregistered template: sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain']) storage_domain_service = storage_domains_service.storage_domain_service( sd_id) templates_service = storage_domain_service.templates_service() # Find the unregistered Template we want to register: templates = templates_service.list(unregistered=True) template = next( (t for t in templates if (t.id == module.params['id'] or t.name == module.params['name'])), None) changed = False if template is None: template = templates_module.search_entity() if template is None: raise ValueError( "Template '%s(%s)' wasn't found." % (module.params['name'], module.params['id'])) else: # Register the template into the system: changed = True template_service = templates_service.template_service( template.id) template_service.register( allow_partial_import=module.params['allow_partial_import'], cluster=otypes.Cluster(name=module.params['cluster']) if module.params['cluster'] else None, vnic_profile_mappings=_get_vnic_profile_mappings(module) if module.params['vnic_profile_mappings'] else None, registration_configuration=otypes. RegistrationConfiguration( cluster_mappings=_get_cluster_mappings(module), role_mappings=_get_role_mappings(module), domain_mappings=_get_domain_mappings(module), ) if (module.params['cluster_mappings'] or module.params['role_mappings'] or module.params['domain_mappings']) else None) if module.params['wait']: template = templates_module.wait_for_import() else: # Fetch template to initialize return. template = template_service.get() ret = templates_module.create( result_state=otypes.TemplateStatus.OK) ret = { 'changed': changed, 'id': template.id, 'template': get_dict_of_struct(template) } module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None)
def create_transfer(connection, disk=None, direction=types.ImageTransferDirection.UPLOAD, host=None, backup=None, inactivity_timeout=None, timeout=60, disk_snapshot=None, shallow=None): """ Create image transfer for upload to disk or download from disk. Arguments: connection (ovirtsdk4.Connection): connection to ovirt engine disk (ovirtsdk4.types.Disk): disk object. Not needed if disk_snaphost is specified. direction (ovirtsdk4.typles.ImageTransferDirection): transfer direction (default UPLOAD) host (ovirtsdk4.types.Host): host object that should perform the transfer. If not specified engine will pick a random host. backup (ovirtsdk4.types.Backup): When downloading backup, the backup object owning the disks. inactivity_timeout (int): Number of seconds engine will wait for client activity before pausing the transfer. If not set, use engine default value. timeout (float, optional): number of seconds to wait for transfer to become ready. disk_snapshot (ovirtsdk4.types.DiskSnapshot): transfer a disk snapshot instead of current data of the disk. shallow (bool): Download only the specified image instead of the entire image chain. When downloading a disk transfer only the active disk snapshot data. When downloading a disk snapshot, transfer only the specified disk snaphost data. Returns: ovirtsdk4.types.ImageTransfer in phase TRANSFERRING """ log.info( "Creating image transfer for %s=%s, direction=%s host=%s backup=%s " "shallow=%s", "disk_snapshot" if disk_snapshot else "disk", disk_snapshot.id if disk_snapshot else disk.id, direction, host, backup, shallow, ) # Create image transfer for disk or snapshot. transfer = types.ImageTransfer( host=host, direction=direction, backup=backup, inactivity_timeout=inactivity_timeout, # format=raw uses the NBD backend, enabling: # - Transfer raw guest data, regardless of the disk format. # - Automatic format conversion to remote disk format. For example, # upload qcow2 image to raw disk, or raw image to qcow2 disk. # - Collapsed qcow2 chains to single raw file. # - Extents reporting for qcow2 images and raw images on file storage, # speeding up downloads. format=types.DiskFormat.RAW, shallow=shallow, ) if disk_snapshot: transfer.snapshot = types.DiskSnapshot(id=disk_snapshot.id) else: transfer.disk = types.Disk(id=disk.id) transfers_service = connection.system_service().image_transfers_service() # Add the new transfer to engine. This starts the transfer and retruns a # transfer ID that can be used to track this image transfer. transfer = transfers_service.add(transfer) # You can use the transfer id to locate logs for this transfer. log.info("Transfer ID %s", transfer.id) # At this point the transfer owns the disk and will delete the disk if the # transfer is canceled, or if finalizing the transfer fails. transfer_service = transfers_service.image_transfer_service(transfer.id) start = time.time() while True: time.sleep(1) try: transfer = transfer_service.get() except sdk.NotFoundError: # The system has removed the disk and the transfer. raise RuntimeError("Transfer {} was removed".format(transfer.id)) if transfer.phase == types.ImageTransferPhase.FINISHED_FAILURE: # The system will remove the disk and the transfer soon. raise RuntimeError("Transfer {} has failed".format(transfer.id)) if transfer.phase == types.ImageTransferPhase.PAUSED_SYSTEM: transfer_service.cancel() raise RuntimeError("Transfer {} was paused by system".format( transfer.id)) if transfer.phase == types.ImageTransferPhase.TRANSFERRING: break if transfer.phase != types.ImageTransferPhase.INITIALIZING: transfer_service.cancel() raise RuntimeError("Unexpected transfer {} phase {}".format( transfer.id, transfer.phase)) if time.time() > start + timeout: log.info("Cancelling transfer %s", transfer.id) transfer_service.cancel() raise RuntimeError("Timed out waiting for transfer {}".format( transfer.id)) log.info("Transfer initialized in %.3f seconds", time.time() - start) # Log the transfer host name. This is very useful for troubleshooting. hosts_service = connection.system_service().hosts_service() host_service = hosts_service.host_service(transfer.host.id) transfer.host = host_service.get() log.info("Transfer host name: %s", transfer.host.name) return transfer
def add(self, memory, disk_size, cluster_name, storage_name, nic_name='eth0', network_interface='virtio', network_name='ovirtmgmt', disk_interface='virtio', disk_format='raw', template_name='Blank', timeout=300): """ Create VM with one NIC and one Disk. :param memory: VM's memory size such as 1024*1024*1024=1GB. :param disk_size: VM's disk size such as 512*1024=512MB. :param nic_name: VM's NICs name such as 'eth0'. :param network_interface: VM's network interface such as 'virtio'. :param network_name: network such as ovirtmgmt for ovirt, rhevm for rhel. :param disk_format: VM's disk format such as 'raw' or 'cow'. :param disk_interface: VM's disk interface such as 'virtio'. :param cluster_name: cluster name. :param storage_name: storage domain name. :param template_name: VM's template name, default is 'Blank'. :param timeout: Time out """ end_time = time.time() + timeout # network name is ovirtmgmt for ovirt, rhevm for rhel. vm_params = types.VM( name=self.name, memory=memory, cluster=self.connection.clusters.get(cluster_name), template=self.connection.templates.get(template_name)) storage = self.connection.storagedomains.get(storage_name) storage_params = types.StorageDomains(storage_domain=[storage]) nic_params = types.NIC(name=nic_name, network=types.Network(name=network_name), interface=network_interface) disk_params = types.Disk(storage_domains=storage_params, size=disk_size, type_='system', status=None, interface=disk_interface, format=disk_format, sparse=True, bootable=True) try: logging.info('Creating a VM %s' % self.name) self.connection.vms.add(vm_params) logging.info('NIC is added to VM %s' % self.name) self.instance.nics.add(nic_params) logging.info('Disk is added to VM %s' % self.name) self.instance.disks.add(disk_params) logging.info('Waiting for VM to reach <Down> status') vm_down = False while time.time() < end_time: if self.is_dead(): vm_down = True break time.sleep(1) if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) except Exception as e: logging.error('Failed to create VM with disk and NIC\n%s' % str(e))
# Get the reference to the service that manages the virtual machines: vms_service = system_service.vms_service() # Add a new virtual machine explicitly indicating the identifier of the # template version that we want to use and indicating that template disk # should be created on specific storage domain for the virtual machine: vm = vms_service.add( types.Vm( name='myvm', cluster=types.Cluster(name='mycluster'), template=types.Template(id=template_id), disk_attachments=[ types.DiskAttachment(disk=types.Disk( id=disk.id, format=types.DiskFormat.COW, storage_domains=[ types.StorageDomain(id=storage_domain.id, ), ], ), ), ], )) # Get a reference to the service that manages the virtual machine that # was created in the previous step: vm_service = vms_service.vm_service(vm.id) # Wait till the virtual machine is down, which indicats that all the # disks have been created: while True: time.sleep(5) vm = vm_service.get()