def disk_add(self, args): vms_service = self.c.system_service().vms_service() vm = vms_service.list(search=args.vm_name)[0] disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name='mydisk', description='Created by titamu', format=types.DiskFormat.COW, provisioned_size=args.size * 2**30, storage_domains=[ types.StorageDomain(name=storagedomain, ), ], ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True, ), )
def test_hotunplug_disk(engine_api): engine = engine_api.system_service() disk_service = test_utils.get_disk_service(engine, DISK0_NAME) disk_attachments_service = test_utils.get_disk_attachments_service( engine, VM0_NAME) disk_attachment = disk_attachments_service.attachment_service( disk_service.get().id) with engine_utils.wait_for_event(engine, 2002): # USER_HOTUNPLUG_DISK(2,002) correlation_id = 'test_hotunplug_disk' assert disk_attachment.update(types.DiskAttachment(active=False), query={'correlation_id': correlation_id}) assertions.assert_true_within_long( lambda: test_utils.all_jobs_finished(engine, correlation_id)) assertions.assert_true_within_short( lambda: disk_service.get().status == types.DiskStatus.OK) assertions.assert_true_within_short( lambda: disk_attachment.get().active == False)
def connect_direct_lun(self, lun_name=None, lun_ip_addr=None, lun_port=None, lun_iscsi_target=None, interface=None): """ Connects a direct lun disk to the VM. Args: lun_name: name of LUN lun_ip_addr: LUN ip address lun_port: LUN port lun_iscsi_target: iscsi target """ disk_attachments_service = self.api.disk_attachments_service() if not self.system.does_disk_exist(lun_name): disk_attachment = types.DiskAttachment( disk=types.Disk( name=lun_name, shareable=True, format='raw', lun_storage=types.HostStorage( type=types.StorageType.ISCSI, logical_units=[ types.LogicalUnit( address=lun_ip_addr, port=lun_port, target=lun_iscsi_target, ) ] ) ), interface=types.DiskInterface(getattr(types.DiskInterface, interface or 'VIRTIO')), active=True ) else: disk_attachment = self._get_disk_attachment_service(lun_name).get() disk_attachments_service.add(disk_attachment) wait_for( self._is_disk_ok, func_args=[disk_attachment.disk.id], delay=5, num_sec=900, message="check if disk is attached" ) return True
def add_directlun(prefix): # Find LUN GUIDs ret = prefix.virt_env.get_vm(SD_ISCSI_HOST_NAME).ssh( ['cat', '/root/multipath.txt']) nt.assert_equals(ret.code, 0) all_guids = ret.out.splitlines() lun_guid = all_guids[ SD_ISCSI_NR_LUNS] # Take the first unused LUN. 0-(SD_ISCSI_NR_LUNS) are used by iSCSI SD dlun_params = types.Disk( name=DLUN_DISK_NAME, format=types.DiskFormat.RAW, lun_storage=types.HostStorage( type=types.StorageType.ISCSI, logical_units=[ types.LogicalUnit( address=prefix.virt_env.get_vm(SD_ISCSI_HOST_NAME).ip(), port=SD_ISCSI_PORT, target=SD_ISCSI_TARGET, id=lun_guid, username='******', password='******', ) ]), sgio=types.ScsiGenericIO.UNFILTERED, ) api = prefix.virt_env.engine_vm().get_api_v4() vm_service = _get_vm_service(api.system_service(), VM0_NAME) disk_attachments_service = vm_service.disk_attachments_service() disk_attachments_service.add( types.DiskAttachment(disk=dlun_params, interface=types.DiskInterface.VIRTIO_SCSI)) disk_service = _get_disk_service(api.system_service(), DLUN_DISK_NAME) attachment_service = disk_attachments_service.attachment_service( disk_service.get().id) nt.assert_not_equal(attachment_service.get(), None, 'Direct LUN disk not attached')
def add_disk_to_vm(self, vm_name, storage_domain=None, size=None, interface=None, format=None, active=True): """ Args: vm_name: string name storage_domain: string name of the storage domain (datastore) size: integer size of disk in bytes, ex 8GB: 8*1024*1024 interface: string disk interface type format: string disk format type active: boolean whether the disk is active Returns: None Notes: Disk format and interface type definitions, and their valid values, can be found in ovirtsdk documentation: http://ovirt.github.io/ovirt-engine-sdk/4.1/types.m.html#ovirtsdk4.types.DiskInterface http://ovirt.github.io/ovirt-engine-sdk/4.1/types.m.html#ovirtsdk4.types.DiskFormat """ disk_attachments_service = self._get_disk_attachments_service(vm_name) disk_attachment = disk_attachments_service.add( types.DiskAttachment(disk=types.Disk( format=types.DiskFormat(format), provisioned_size=size, storage_domains=[types.StorageDomain(name=storage_domain, )]), interface=types.DiskInterface(interface), active=active)) wait_for(self._check_disk, func_args=[disk_attachment.disk.id], delay=5, num_sec=900, message="check if disk is attached")
def add_disk(sparse=False): vms_service = connection.system_service().vms_service() vm = vms_service.list(search='name=' + VM_NAME)[0] disk_attachments_service = vms_service.\ vm_service(vm.id).disk_attachments_service() disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=template_name + 'disk', description='Thinly-provisioned' if sparse else 'Preallocated', format=types.DiskFormat.RAW, provisioned_size=80 * 2**30, storage_domains=[ types.StorageDomain(name=storage_domain_name, ), ], sparse=sparse), interface=types.DiskInterface.VIRTIO, bootable=False, active=True, ), ) return disk_attachment.disk
def add_disk(api): engine = api.system_service() vm0_service = test_utils.get_vm_service(engine, VM0_NAME) vm0_disk_attachments_service = test_utils.get_disk_attachments_service(engine, VM0_NAME) vm0_disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=DISK0_NAME, format=types.DiskFormat.COW, initial_size=10 * GB, provisioned_size=1, sparse=True, storage_domains=[ types.StorageDomain( name=SD_NFS_NAME, ), ], ), interface=types.DiskInterface.VIRTIO, active=True, bootable=True, ), ) disk0_service = test_utils.get_disk_service(engine, DISK0_NAME) disk0_attachment_service = vm0_disk_attachments_service.attachment_service(disk0_service.get().id) testlib.assert_true_within_long( lambda: disk0_attachment_service.get().active == True ) testlib.assert_true_within_long( lambda: disk0_service.get().status == types.DiskStatus.OK )
def _backup_snapshot_disks(self, snapshot_service, backup_vm_date_dir): disks_service = snapshot_service.disks_service() snapshot = snapshot_service.get() attachments_service = self._agent_vm_service.disk_attachments_service() for snapshot_disk in disks_service.list(): attachment = attachments_service.add( attachment=types.DiskAttachment( disk=types.Disk(id=snapshot_disk.id, snapshot=types.Snapshot(id=snapshot.id)), active=True, bootable=False, interface=types.DiskInterface.VIRTIO)) image_id = snapshot_disk.image_id image_size = snapshot_disk.provisioned_size image_description = snapshot_disk.description if snapshot_disk.description else 'None' logging.info('Attach disk {}'.format(attachment.disk.id)) with AutoAttachmentService(attachments_service, attachment): self._copy_disk(attachment, directory=backup_vm_date_dir, image_id=image_id, image_size=image_size, image_description=image_description)
def add_disk(self, name, size, pool=None, thin=True, template=None, shareable=False, existing=None): """ :param name: :param size: :param pool: :param thin: :param template: :param shareable: :param existing: :return: """ size *= 2**30 system_service = self.conn.system_service() sds_service = system_service.storage_domains_service() poolcheck = sds_service.list(search='name=%s' % pool) if not poolcheck: return {'result': 'failure', 'reason': "Pool %s not found" % pool} vmsearch = self.vms_service.list(search='name=%s' % name) if not vmsearch: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} vm = self.vms_service.vm_service(vmsearch[0].id) disk_attachments_service = vm.disk_attachments_service() currentdisk = len(disk_attachments_service.list()) diskindex = currentdisk + 1 diskname = '%s_Disk%s' % (name, diskindex) disk_attachment = disk_attachments_service.add( types.DiskAttachment(disk=types.Disk( name=diskname, format=types.DiskFormat.COW, provisioned_size=size, storage_domains=[types.StorageDomain(name=pool)]), interface=types.DiskInterface.VIRTIO, bootable=False, active=True)) disks_service = self.conn.system_service().disks_service() disk_service = disks_service.disk_service(disk_attachment.disk.id) timeout = 0 while True: disk = disk_service.get() if disk.status == types.DiskStatus.OK: break else: timeout += 5 sleep(5) common.pprint("Waiting for disk %s to be ready" % diskname, color='green') if timeout > 40: return { 'result': 'failure', 'reason': 'timeout waiting for disk %s to be ready' % diskname }
def snapshot_merge(api): engine = api.system_service() vm0_snapshots_service = test_utils.get_vm_snapshots_service(engine, VM0_NAME) disk = engine.disks_service().list(search='name={}'.format(DISK0_NAME))[0] dead_snap1_params = types.Snapshot( description='dead_snap1', persist_memorystate=False, disk_attachments=[ types.DiskAttachment( disk=types.Disk( id=disk.id ) ) ] ) correlation_id = uuid.uuid4() vm0_snapshots_service.add( dead_snap1_params, query={'correlation_id': correlation_id} ) testlib.assert_true_within_short( lambda: test_utils.all_jobs_finished(engine, correlation_id) ) testlib.assert_true_within_short( lambda: vm0_snapshots_service.list()[-1].snapshot_status == types.SnapshotStatus.OK ) dead_snap2_params = types.Snapshot( description='dead_snap2', persist_memorystate=False, disk_attachments=[ types.DiskAttachment( disk=types.Disk( id=disk.id ) ) ] ) correlation_id_snap2 = uuid.uuid4() vm0_snapshots_service.add( dead_snap2_params, query={'correlation_id': correlation_id_snap2} ) testlib.assert_true_within_short( lambda: test_utils.all_jobs_finished(engine, correlation_id_snap2) ) testlib.assert_true_within_short( lambda: vm0_snapshots_service.list()[-1].snapshot_status == types.SnapshotStatus.OK ) snapshot = vm0_snapshots_service.list()[-2] vm0_snapshots_service.snapshot_service(snapshot.id).remove() testlib.assert_true_within_short( lambda: (len(vm0_snapshots_service.list()) == 2) and (vm0_snapshots_service.list()[-1].snapshot_status == types.SnapshotStatus.OK), )
def create_snap(self,vmid,snapname,my_disk): vm_service = self.connection.service("vms") snapshots_service = vm_service.vm_service(vmid).snapshots_service() snapshots_service.add(types.Snapshot(description=snapname,persist_memorystate=False, disk_attachments=[ types.DiskAttachment( disk=types.Disk( id=my_disk)) ])) snapid = self.get_snap_id(vmid) status = self.get_snap_status(vmid,snapid) printf.INFO("Trying to create snapshot of VM: " + vmid) while str(status) == "locked": time.sleep(10) printf.INFO("Waiting until snapshot creation ends") status = self.get_snap_status(vmid,snapid) printf.OK("Snapshot created")
vm = vms_service.list(search='name=myvm')[0] # Locate the service that manages the disk attachments of the virtual # machine: disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() # Use the "add" method of the disk attachments service to add the disk: disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name='mydisk', description='My disk', format=types.DiskFormat.COW, provisioned_size=1 * 2**20, storage_domains=[ types.StorageDomain(name='om03', ), ], ), interface=types.DiskInterface.VIRTIO, bootable=False, ), ) # Wait till the disk is OK: disks_service = connection.system_service().disks_service() disk_service = disks_service.disk_service(disk_attachment.disk.id) while True: time.sleep(5) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break
# machine: disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() # Use the "add" method of the disk attachments service to add the LUN disk. disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name='myiscsidisk', lun_storage=types.HostStorage( type=types.StorageType.ISCSI, logical_units=[ types.LogicalUnit( address='192.168.1.1', port=3260, target='iqn.2017-05.org.ovirt:storage', id='36001405d6c6cbba754c4b568d843ff6a', username='******', password='******', ) ], ), ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True, ), ) # Close the connection to the server: connection.close()
# the template using a format different to the format used by the # original disks. attachments = connection.follow_link(vm.disk_attachments) disk_ids = [attachment.disk.id for attachment in attachments] # Send the request to create the template. Note that the way to specify # the original virtual machine, and the customizations, is to use the # 'vm' attribute of the 'Template' type. In the customization we # explicitly indicate that we want COW disks, regardless of what format # the original disks had. templates_service = system_service.templates_service() template = templates_service.add(template=types.Template( name='mytemplate', vm=types.Vm(id=vm.id, disk_attachments=[ types.DiskAttachment(disk=types.Disk( id=disk_id, sparse=True, format=types.DiskFormat.COW)) for disk_id in disk_ids ]))) # Wait till the status of the template is OK, as that means that it is # completely created and ready to use: template_service = templates_service.template_service(template.id) while True: time.sleep(5) template = template_service.get() if template.status == types.TemplateStatus.OK: break # Close the connection to the server: connection.close()
# Get the reference to the service that manages the virtual machines: vms_service = system_service.vms_service() # Add a new virtual machine explicitly indicating the identifier of the # template version that we want to use and indicating that template disk # should be created on specific storage domain for the virtual machine: vm = vms_service.add( types.Vm( name='myvm', cluster=types.Cluster(name='mycluster'), template=types.Template(id=template_id), disk_attachments=[ types.DiskAttachment(disk=types.Disk( id=disk.id, format=types.DiskFormat.COW, storage_domains=[ types.StorageDomain(id=storage_domain.id, ), ], ), ), ], )) # Get a reference to the service that manages the virtual machine that # was created in the previous step: vm_service = vms_service.vm_service(vm.id) # Wait till the virtual machine is down, which indicats that all the # disks have been created: while True: time.sleep(5) vm = vm_service.get()
# Find the disk attachment for the disk we are interested on: disk_attachments_service = vm_service.disk_attachments_service() disk_attachments = disk_attachments_service.list() disk_attachment = next((a for a in disk_attachments if a.disk.id == disk.id), None) # Deactivate the disk we found # or print an error if there is no such disk attached: if disk_attachment is not None: # Locate the service that manages the disk attachment that we found # in the previous step: disk_attachment_service = disk_attachments_service.attachment_service( disk_attachment.id) # Deactivate the disk attachment disk_attachment_service.update(types.DiskAttachment(active=False)) # Wait till the disk attachment not active: while True: time.sleep(5) disk_attachment = disk_attachment_service.get() if disk_attachment.active == False: break else: print("There's no disk attachment for %s." % disk.name) # Close the connection to the server: connection.close()
#vm.externalhostproviders = types.ExternalHostProvider(id='d78051b5-37c8-43bc-8eeb-04e49e59bf3') #vm.memory_policy=types.MemoryPolicy( guaranteed=VMMemoryGuaranteed ) vms_service.add(vm) #################################### #vms_service = connection.system_service().vms_service() vm = vms_service.list(search='name=' + VMName)[0] disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( format=types.DiskFormat.COW, provisioned_size=VMCapacity, storage_domains=[ types.StorageDomain(name=VMDomain, ), ], ), interface=types.DiskInterface.VIRTIO_SCSI, bootable=True, active=True, ), ) ################################### netlist = connection.system_service().vnic_profiles_service().list( search=VMNetwork)[0] nics_service = vms_service.vm_service(vm.id).nics_service() nics_service.add( types.Nic( name='nic1', interface=types.NicInterface.VIRTIO, # network=types.Network(name='ovirtmgmt'), vnic_profile=types.VnicProfile(id=netlist.id),
def restore(connection, AGENT_VM_NAME="obackup", DATA_VM_NAME, APPLICATION_NAME="obackup", DATA_DISKS, CONF, sd, vm_config): # Get the reference to the root of the services tree: system_service = connection.system_service() # Get the reference to the service that we will use to send events to # the audit log: events_service = system_service.events_service() # In order to send events we need to also send unique integer ids. These # should usually come from an external database, but in this example we # will just generate them from the current time in seconds since Jan 1st # 1970. event_id = int(time.time()) # Get the reference to the service that manages the virtual machines: vms_service = system_service.vms_service() # # Find the virtual machine that we want to back up. Note that we need to # # use the 'all_content' parameter to retrieve the retrieve the OVF, as # # it isn't retrieved by default: # data_vm = vms_service.list( # search='name=%s' % DATA_VM_NAME, # all_content=True, # )[0] # logging.info( # 'Found data virtual machine \'%s\', the id is \'%s\'.', # data_vm.name, data_vm.id, # ) # vm_dict = { "name" : data_vm.name, "cluster" : data_vm.cluster.id, "soundcard_enabled" : data_vm.soundcard_enabled, "cpu" : { "architecture" : data_vm.cpu.architecture.name, "sockets" : data_vm.cpu.topology.sockets, "cores" : data_vm.cpu.topology.cores, "threads" : data_vm.cpu.topology.threads }, "memory" : data_vm.memory, "memory_policy" : { "max" : data_vm.memory_policy.max, "ballooning" : data_vm.memory_policy.ballooning, "guaranteed" : data_vm.memory_policy.guaranteed, }, "description" : data_vm.description, "nics" : nics, "disk_attachments" : data_vm_disks, "display" : data_vm.display.type.name, "time_zone" : data_vm.time_zone.name, "os" : data_vm.os.type, "type" : data_vm.type.name, "virtio_scsi" : data_vm.virtio_scsi.enabled, "high_availability" : data_vm.high_availability.enabled, } vms_service.add( types.Vm( name = DATA_VM_NAME + '_restored', cluster = types.Cluster(name=cluster), template = types.Template(name='Blank'), soundcard_enabled = types. ), ) logging.info( 'Creating VM %s_restored', DATA_VM_NAME, ) # Find the virtual machine were we will attach the disks in order to do # the backup: agent_vm = vms_service.list( search='name=%s' % AGENT_VM_NAME, )[0] logging.info( 'Found agent virtual machine \'%s\', the id is \'%s\'.', agent_vm.name, agent_vm.id, ) # Find the services that manage the data and agent virtual machines: data_vm_service = vms_service.vm_service(data_vm.id) agent_vm_service = vms_service.vm_service(agent_vm.id) # Create an unique description for the snapshot, so that it is easier # for the administrator to identify this snapshot as a temporary one # created just for backup purposes: snap_description = '%s_obackup_%s' % (data_vm.name, uuid.uuid4()) # Send an external event to indicate to the administrator that the # backup of the virtual machine is starting. Note that the description # of the event contains the name of the virtual machine and the name of # the temporary snapshot, this way, if something fails, the administrator # will know what snapshot was used and remove it manually. events_service.add( event=types.Event( vm=types.Vm( id=data_vm.id, ), origin=APPLICATION_NAME, severity=types.LogSeverity.NORMAL, custom_id=event_id, description=( 'Backup of virtual machine \'%s\' using snapshot \'%s\' is ' 'starting.' % (data_vm.name, snap_description) ), ), ) event_id += 1 # Save the OVF to a file, so that we can use to restore the virtual # machine later. The name of the file is the name of the virtual # machine, followed by a dash and the identifier of the virtual machine, # to make it unique: ovf_data = data_vm.initialization.configuration.data ovf_file = '%s-%s.ovf' % (data_vm.name, data_vm.id) with open(ovf_file, 'w') as ovs_fd: ovs_fd.write(ovf_data.encode('utf-8')) logging.info('Wrote OVF to file \'%s\'.', os.path.abspath(ovf_file)) # Send the request to create the snapshot. Note that this will return # before the snapshot is completely created, so we will later need to # wait till the snapshot is completely created. # The snapshot will not include memory. Change to True the parameter # persist_memorystate to get it (in that case the VM will be paused for a while). snaps_service = data_vm_service.snapshots_service() snap = snaps_service.add( snapshot=types.Snapshot( description=snap_description, persist_memorystate=False, ), ) logging.info( 'Sent request to create snapshot \'%s\', the id is \'%s\'.', snap.description, snap.id, ) # Poll and wait till the status of the snapshot is 'ok', which means # that it is completely created: snap_service = snaps_service.snapshot_service(snap.id) while snap.snapshot_status != types.SnapshotStatus.OK: logging.info( 'Waiting till the snapshot is created, the satus is now \'%s\'.', snap.snapshot_status, ) time.sleep(1) snap = snap_service.get() logging.info('The snapshot is now complete.') # Retrieve the descriptions of the disks of the snapshot: snap_disks_service = snap_service.disks_service() snap_disks = snap_disks_service.list() # Attach all the disks of the snapshot to the agent virtual machine, and # save the resulting disk attachments in a list so that we can later # detach them easily: attachments_service = agent_vm_service.disk_attachments_service() attachments = [] attachment_map = {} alphabet = map(chr, range(97, 123)) blkid = 0 for snap_disk in snap_disks: if snap_disk.id in DATA_DISKS: blkid += 1 attachment = attachments_service.add( attachment=types.DiskAttachment( disk=types.Disk( id=snap_disk.id, snapshot=types.Snapshot( id=snap.id, ), ), active=True, bootable=False, interface=types.DiskInterface.VIRTIO, ), ) attachments.append(attachment) attachment_map["vd" + alphabet[blkid]] = attachment logging.info( 'Attached disk \'%s\' to the agent virtual machine.', attachment.disk.id, ) # Insert here the code to contact the backup agent and do the actual # backup ... logging.info('Doing the actual backup ...') for vdisk in attachment_map.keys(): logging.info( 'Backing up disk \'%s\'.', attachment_map[vdisk].id ) backup_status = backy2.backup_disk(vdisk, attachment_map[vdisk].id, snap_description, data_vm.name) if backup_status == "Failed": logging.error( 'Failed to back up VM %s Disk %s, check backy2 logs', data_vm.name, attachment_map[vdisk].id ) events_service.add( event=types.Fault( vm=types.Vm( id=data_vm.id, ), origin=APPLICATION_NAME, severity=types.LogSeverity.ERROR, custom_id=event_id, description=( 'Backup of /VM \'%s\' disk %s failed', data_vm.name, attachment_map[vdisk].id ), ), ) event_id += 1 # Detach the disks from the agent virtual machine: for attachment in attachments: attachment_service = attachments_service.attachment_service(attachment.id) attachment_service.remove() logging.info( 'Detached disk \'%s\' from the agent virtual machine.', attachment.disk.id, ) # Remove the snapshot: snap_service.remove() logging.info('Removed the snapshot \'%s\'.', snap.description) # Send an external event to indicate to the administrator that the # backup of the virtual machine is completed: events_service.add( event=types.Event( vm=types.Vm( id=data_vm.id, ), origin=APPLICATION_NAME, severity=types.LogSeverity.NORMAL, custom_id=event_id, description=( 'Backup of virtual machine \'%s\' using snapshot \'%s\' is ' 'completed.' % (data_vm.name, snap_description) ), ), ) event_id += 1
def create_and_run_vm(): connection = get_connection() system_service = connection.system_service() storage_domains_service = system_service.storage_domains_service() storage_domain = storage_domains_service.list(search='name=' + storage_domain_name)[0] templates_service = system_service.templates_service() templates = templates_service.list(search='name=' + template_name) if not templates: print("could not find the required template: {}".format(template_name)) quit() template_id = None for template in templates: if template.version.version_number == template_version: template_id = template.id template_service = templates_service.template_service(template_id) disk_attachments = connection.follow_link( template_service.get().disk_attachments) disk = disk_attachments[0].disk vms_service = system_service.vms_service() vm = vms_service.add( types.Vm( name=VM_NAME, cluster=types.Cluster(name='Default'), template=types.Template(id=template_id), disk_attachments=[ types.DiskAttachment(disk=types.Disk( id=disk.id, format=types.DiskFormat.COW, storage_domains=[ types.StorageDomain(id=storage_domain.id, ), ], ), ), ], )) vm_service = vms_service.vm_service(vm.id) while True: time.sleep(10) vm = vm_service.get() if vm.status == types.VmStatus.DOWN: break vm_service.start() while True: time.sleep(10) vm = vm_service.get() if vm.status == types.VmStatus.UP: break connection.close()
# machine: disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() # Use the "add" method of the disk attachments service to add the disk. # Note that the size of the disk, the `provisioned_size` attribute, is # specified in bytes, so to create a disk of 10 GiB the value should # be 10 * 2^30. disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name='mydisk', description='my disk', format=types.DiskFormat.COW, provisioned_size=10 * 2**30, storage_domains=[ types.StorageDomain(name='bs-scsi-012', ), ], ), interface=types.DiskInterface.VIRTIO, bootable=False, active=True, ), ) # Find the service that manages the disk attachment that was added in the # previous step: disk_attachment_service = disk_attachments_service.attachment_service( disk_attachment.id) # Wait till the disk is OK: disks_service = connection.system_service().disks_service() disk_service = disks_service.disk_service(disk_attachment.disk.id)
def add_disk( self, vm, size, disk_format=RHV_DISK_FORMAT_RAW, disk_interface=RHV_DISK_INTERFACE_VIRTIO_SCSI, sparse=None, pass_discard=None, storage_domain_id=None, timeout=120, ): """ Attaches disk to VM Args: vm (types.Vm): Vm instance size (int) : size of disk in GB disk_format (str): underlying storage format of disks (default: "RAW") disk_interface (str): underlying storage interface of disks communication with controller (default: 'VIRTIO_SCSI') sparse (bool): disk allocation policy. True for sparse, false for preallocated (default: None) pass_discard (bool): True if the virtual machine passes discard commands to the storage, False otherwise (default: None) storage_domain_id (str): A unique identifier for the storage domain timeout (int): The timeout in seconds for disk status OK (default: 120) """ logger.info(f"Adding disk to {vm.name}") disk_size_bytes = int(size) * GB storage_domain_id = (storage_domain_id or config.ENV_DATA["ovirt_storage_domain_id"]) disk_attachments_service = self.get_disk_attachments_service(vm.id) disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( format=getattr(types.DiskFormat, disk_format), provisioned_size=disk_size_bytes, sparse=sparse, storage_domains=[ types.StorageDomain(id=storage_domain_id, ), ], ), interface=getattr(types.DiskInterface, disk_interface), bootable=False, active=True, pass_discard=pass_discard, ), ) # Wait for the disk to reach OK: disk_service = self.get_disk_service(disk_attachment.disk.id) try: for sample in TimeoutSampler(timeout, 3, disk_service.get): logger.info(f"Waiting for disk status to be OK. " f"Current disk status: {sample.status}") if sample.status == types.DiskStatus.OK: logger.info(f"Disk {sample.name} reached OK status") break except TimeoutExpiredError: logger.error( f"Disk {sample.name} failed to get attached to {vm.name}") raise logger.info(f"{size}GB disk added successfully to {vm.name}")
# Retrieve the descriptions of the disks of the snapshot: snap_disks_service = snap_service.disks_service() snap_disks = snap_disks_service.list() # Attach all the disks of the snapshot to the agent virtual machine, and # save the resulting disk attachments in a list so that we can later # detach them easily: attachments_service = agent_vm_service.disk_attachments_service() attachments = [] for snap_disk in snap_disks: attachment = attachments_service.add(attachment=types.DiskAttachment( disk=types.Disk( id=snap_disk.id, snapshot=types.Snapshot(id=snap.id, ), ), active=True, bootable=False, interface=types.DiskInterface.VIRTIO, ), ) attachments.append(attachment) logging.info( 'Attached disk \'%s\' to the agent virtual machine.', attachment.disk.id, ) # Now the disks are attached to the virtual agent virtual machine, we # can then ask that virtual machine to perform the backup. Doing that # requires a mechanism to talk to the backup software that runs inside the # agent virtual machine. That is outside of the scope of the SDK. But if # the guest agent is installed in the virtual machine then we can
def main(): parser = option_parser() args = parser.parse_args() if not early_option_check(args): sys.exit(-1) # Create the connection to the server: connection = sdk.Connection( url='https://@ENGINE_FQDN@/ovirt-engine/api', username='******', password=base64.b64decode('@ENGINEPASS_BASE64@'), ca_file='@CA_PEM@', debug=False, ) vms_service = connection.system_service().vms_service() cluster = connection.system_service().clusters_service().list()[0] clustername = cluster.name dcs_service = connection.system_service().data_centers_service() dc = dcs_service.list(search='Clusters.name=%s' % cluster.name)[0] networks_service = dcs_service.service(dc.id).networks_service() profiles_service = connection.system_service().vnic_profiles_service() if not later_option_check(args, connection): connection.close() sys.exit(-1) shorthand = { 'rhel6': 'rhel_6x64', 'rhel7': 'rhel_7x64', 'rhel8': 'rhel_8x64', 'ubuntu': 'ubuntu_14_04', 'debian': 'debian_7', } vmtype = { 'server': types.VmType.SERVER, 'desktop': types.VmType.DESKTOP, 'high_performance': types.VmType.HIGH_PERFORMANCE } # Creating new virtual machine vm = types.Vm() vm.name = args.name vm.cluster = types.Cluster(name=clustername) vm.template = types.Template(name=args.template) if args.os in shorthand.keys(): vm.os = types.OperatingSystem(type=shorthand[args.os]) else: vm.os = types.OperatingSystem(type=args.os) vm.memory = args.memory * 1024 * 1024 if args.balloon == 0: vm.memory_policy = types.MemoryPolicy( max=args.max_memory * 1024 * 1024, guaranteed=args.guaranteed_memory * 1024 * 1024) else: vm.memory_policy = types.MemoryPolicy( max=args.max_memory * 1024 * 1024, guaranteed=args.guaranteed_memory * 1024 * 1024, ballooning=True if args.balloon == 1 else False) vm.cpu = types.Cpu() vm.cpu.architecture = types.Architecture.X86_64 vm.cpu.topology = types.CpuTopology(cores=1, sockets=args.cpu, threads=1) if args.sound != 0: vm.soundcard_enabled = True if args.sound == 1 else False vm.type = vmtype[args.type] print("Creating New Virtual Machine:{0}".format(args.name)) vm = vms_service.add(vm) while vms_service.list(search=args.name)[0].status != types.VmStatus.DOWN: time.sleep(1) # Attach network interface(s) nics_service = vms_service.vm_service(vm.id).nics_service() nicnum = 0 for netname in args.vmnet: network = next( (n for n in networks_service.list() if n.name == netname), None) profile_id = None for profile in profiles_service.list(): if profile.name == netname: profile_id = profile.id break if profile_id != None: nicnum = nicnum + 1 print("Attaching nic{0}(Network:{1})".format(nicnum, netname)) nics_service.add( types.Nic( name="nic{0}".format(nicnum), vnic_profile=types.VnicProfile(id=profile_id, ), ), ) # Create and attach disk(s) disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() disks_service = connection.system_service().disks_service() disknum = 0 for d in args.vmdisk: disknum += 1 new_disk = types.DiskAttachment() new_disk.disk = types.Disk() new_disk.disk.name = "{0}_Disk{1}".format(args.name, disknum) new_disk.disk.provisioned_size = int(d.split(':')[1]) * 2**30 new_disk.disk.storage_domains = [ types.StorageDomain(name=d.split(':')[0]) ] if d.split(':')[2] == "RAW": new_disk.disk.format = types.DiskFormat.RAW else: new_disk.disk.format = types.DiskFormat.COW new_disk.interface = types.DiskInterface.VIRTIO_SCSI new_disk.active = True if disknum == 1: new_disk.bootable = True print( "Attaching Disk{0}(Domain:{1}, Size:{2}GB, DiskFormat:{3})".format( disknum, d.split(':')[0], d.split(':')[1], d.split(':')[2])) disk_attachment = disk_attachments_service.add(new_disk) disk_service = disks_service.disk_service(disk_attachment.disk.id) # wait disk attach finish time.sleep(5) while disk_service.get().status != types.DiskStatus.OK: print("Waiting disk attach complete") time.sleep(5) if args.ks != None or args.ps != None or args.ai != None: # one-shot VM configuration for Kickstart/preseed one_vm = types.Vm() one_vm.os = types.OperatingSystem() one_vm.os.kernel = 'iso://' + args.kernel one_vm.os.initrd = 'iso://' + args.initrd one_vm.run_once = True one_vm.cdroms = list() one_vm.cdroms.append(types.Cdrom()) one_vm.cdroms[0].file = types.File() one_vm.cdroms[0].file.id = args.iso if args.dns == None: args.dns = "" elif args.os == 'rhel6': args.dns = 'dns=' + args.dns else: args.dns = 'nameserver=' + args.dns if args.os == 'rhel6': ksdev = args.network.split(':')[5] ksip = args.network.split(':')[0] ksnm = calc_netmask(int(args.network.split(':')[3])) ksgw = args.network.split(':')[2] args.network = "ksdevice={0} ip={1} netmask={2} gateway={3}".format( ksdev, ksip, ksnm, ksgw) if args.ks != None: if args.os == 'rhel6': one_vm.os.cmdline = args.network + " " + args.dns + " ks=" + args.ks else: one_vm.os.cmdline = args.network + " " + args.dns + " inst.ks=" + args.ks if args.ps != None: one_vm.os.cmdline = "auto=true url=" + args.ps if args.ai != None: one_vm.os.cmdline = "autoinstall ds=nocloud-net;s=" + args.ai vm_service = vms_service.vm_service(vm.id) print("Starting automatic OS installation on {0}".format(args.name)) vm_service.start(vm=one_vm, volatile=True) # Close the connection to the server: connection.close()
def deploy(self, vm_name, cluster, timeout=900, power_on=True, **kwargs): """ Deploy a VM using this template Args: vm_name -- name of VM to create cluster -- cluster name to which VM should be deployed timeout (optional) -- default 900 power_on (optional) -- default True placement_policy_host (optional) placement_policy_affinity (optional) cpu (optional) -- number of cpu cores sockets (optional) -- numbner of cpu sockets ram (optional) -- memory in GB storage_domain (optional) -- storage domain name to which VM should be deployed Returns: wrapanapi.systems.rhevm.RHEVMVirtualMachine """ self.logger.debug(' Deploying RHEV template %s to VM %s', self.name, vm_name) vm_kwargs = { 'name': vm_name, 'cluster': self.system.get_cluster(cluster), 'template': self.raw, } clone = None domain_name = kwargs.get('storage_domain') if domain_name: # need to specify storage domain, if its different than the template's disks location # then additional options required. disk allocation mode in UI required to be clone clone = True target_storage_domain = self.system.get_storage_domain(domain_name) disk_attachments = [] for template_attachment in self.api.disk_attachments_service( ).list(): new_attachment = types.DiskAttachment( disk=types.Disk(id=template_attachment.id, format=types.DiskFormat.COW, storage_domains=[target_storage_domain])) disk_attachments.append(new_attachment) vm_kwargs['disk_attachments'] = disk_attachments # Placement requires two args if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = types.Host(name=kwargs['placement_policy_host']) policy = types.VmPlacementPolicy( hosts=[host], affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy # if cpu is passed, also default a sockets # unless its passed cpu = kwargs.get('cpu', None) # don't set default if its not passed if cpu: vm_kwargs['cpu'] = types.Cpu(topology=types.CpuTopology( cores=cpu, sockets=kwargs.get('sockets', 1))) if 'ram' in kwargs: vm_kwargs['memory'] = int(kwargs['ram']) # in Bytes vms_service = self.system.api.system_service().vms_service() vms_service.add(types.Vm(**vm_kwargs), clone=clone) vm = self.system.get_vm(vm_name) vm.wait_for_state(VmState.STOPPED, timeout=timeout) if power_on: vm.start() return vm
def attach_disk(self, data_vm): # Find the services that manage the data and agent virtual machines: data_vm_service = self.vms_service.vm_service(data_vm.id) snap_description = '%s-bk-%s' % (data_vm.name, uuid.uuid4()) self.event( data_vm, types.LogSeverity.NORMAL, 'Backup of virtual machine \'{}\' using snapshot \'{}\' is ' 'starting.'.format(data_vm.name, snap_description)) # Send the request to create the snapshot. Note that this will return # before the snapshot is completely created, so we will later need to # wait till the snapshot is completely created. # The snapshot will not include memory. Change to True the parameter # persist_memorystate to get it (in that case the VM will be paused for a while). snaps_service = data_vm_service.snapshots_service() snap = snaps_service.add(snapshot=types.Snapshot( description=snap_description, persist_memorystate=False, ), ) logging.info( 'Sent request to create snapshot \'%s\', the id is \'%s\'.', snap.description, snap.id, ) # Poll and wait till the status of the snapshot is 'ok', which means # that it is completely created: snap_service = snaps_service.snapshot_service(snap.id) while snap.snapshot_status != types.SnapshotStatus.OK: time.sleep(1) snap = snap_service.get() logging.info('The snapshot is now complete.') time.sleep(15) # Retrieve the descriptions of the disks of the snapshot: snap_disks_service = snap_service.disks_service() snap_disks = snap_disks_service.list() # Attach all the disks of the snapshot to the agent virtual machine, and # save the resulting disk attachments in a list so that we can later # detach them easily: attachments_service = self.agent_vm_service.disk_attachments_service() attachments = [] for snap_disk in snap_disks: attachment = attachments_service.add( attachment=types.DiskAttachment( disk=types.Disk( id=snap_disk.id, snapshot=types.Snapshot(id=snap.id, ), ), active=True, bootable=False, interface=types.DiskInterface.VIRTIO, ), ) attachments.append(attachment) logging.info( 'Attached disk \'%s\' to the agent virtual machine.', attachment.disk.id, ) # Now the disks are attached to the virtual agent virtual machine, we # can then ask that virtual machine to perform the backup. Doing that # requires a mechanism to talk to the backup software that runs inside the # agent virtual machine. That is outside of the scope of the SDK. But if # the guest agent is installed in the virtual machine then we can # provide useful information, like the identifiers of the disks that have # just been attached. for attachment in attachments: if attachment.logical_name is not None: logging.info( 'Logical name for disk \'%s\' is \'%s\'.', attachment.disk.id, attachment.logicalname, ) else: logging.info( 'The logical name for disk \'%s\' isn\'t available. Is the ' 'guest agent installed?', attachment.disk.id, ) # We need to sleep here because the system needs time to scan the drives time.sleep(self.config["attach_wait_seconds"]) self.attachments = attachments self.attachments_service = attachments_service self.snap_description = snap_description self.snap_service = snap_service
def attach_disk(params, vm_name): vms_service = connection.system_service().vms_service() name_of_VM = 'name=' + str(vm_name) vm = vms_service.list(search=str(name_of_VM))[0] disk_attachments_service = vms_service.vm_service( vm.id).disk_attachments_service() if params["format"].lower() == "raw": disk_format = types.DiskFormat.RAW elif params["format"].lower() == "cow": disk_format = types.DiskFormat.COW else: return ("Cant determinate format of disk. Supported only RAW and COW") raise SaltCloudExecutionFailure if params["interface"].lower() == "virtio": disk_interface = types.DiskInterface.VIRTIO else: return ("Cant interface of disk. Supported only RAW and COW") raise SaltCloudExecutionFailure if "existing_disk" not in params: disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=params["name"], description=params["description"] if "description" in params else "Not provided", format=disk_format, provisioned_size=int(params["provisioned_size"]) * 2**30, storage_domains=[ types.StorageDomain(name=params["storage_domains"], ), ], ), interface=disk_interface, bootable=params["bootable"], active=params["active"], ), ) else: if params["existing_disk"] == True: disk_attachments_service.add( types.DiskAttachment( disk=types.Disk(id=params["id"], ), active=params["active"], interface=types.DiskInterface(disk_interface), bootable=params["bootable"], )) else: disk_attachment = disk_attachments_service.add( types.DiskAttachment( disk=types.Disk( name=params["name"], description=params["description"] if "description" in params else "Not provided", format=disk_format, provisioned_size=int(params["provisioned_size"]) * 2**30, storage_domains=[ types.StorageDomain( name=params["storage_domains"], ), ], ), interface=disk_interface, bootable=params["bootable"], active=params["active"], ), )
disks_service = system_service.disks_service() print("Creating disk: %s" % disk_id) disk = create_disk(base_volume, disk_id, sd_name, disks_service) # Add VM from saved OVF file print("Creating VM from OVF: %s" % ovf_file_path) vmId = create_vm_from_ovf(ovf_file_path, vms_service) # Locate VM service vm_service = vms_service.vm_service(vmId) # Attach disk to vm vm_service.disk_attachments_service().add( types.DiskAttachment( disk=disk, interface=types.DiskInterface.VIRTIO, bootable=False, active=True, )) # We waited until OK for it when we created the disk. We wait 5s for security time.sleep(5) # Creating a snapshot for each image for image in images_chain[1:]: image_id = os.path.basename(image['filename']) print("Creating snapshot - Image: %s, Disk: %s" % (image_id, disk_id)) create_snapshot("description", image_id, disk_id, vm_service) # Uploading images for image in images_chain: image_path = image['filename']