def _vm_apply_new_disk_size( self, vm: VM, offline: bool, offline_transport: str, transaction: Transaction, disk_size: int = 0, ): """ If the new VM disk size is set, checks if it's correct and sufficient and commit the new size. Rolls it back on the interrupted migration :param VM vm: The migrating VM :param str offline_transport: offline migration transport :param Transaction transaction: The transaction to rollback :param int disk_size: the new disk_size_gib attribute """ size = self.vm_new_disk_size(vm, offline, offline_transport, disk_size) if size == vm.dataset_obj['disk_size_gib']: return old_size = vm.dataset_obj['disk_size_gib'] vm.dataset_obj['disk_size_gib'] = size vm.dataset_obj.commit() if transaction: def restore_size(): vm.dataset_obj['disk_size_gib'] = old_size vm.dataset_obj.commit() transaction.on_rollback('reset_disk_size', restore_size)
def check_migrate_parameters( self, vm: VM, offline: bool, offline_transport: str, disk_size: int = None, ): if offline_transport not in ['netcat', 'drbd', 'xfs']: raise StorageError( 'Unknown offline transport method {}!' .format(offline_transport) ) if disk_size is None: return if disk_size < 1: raise StorageError('disk_size must be at least 1GiB!') if not (offline and offline_transport == 'xfs'): raise StorageError( 'disk_size can be applied only with offline transport xfs!' ) allocated_space = vm.dataset_obj['disk_size_gib'] - vm.disk_free() if disk_size < allocated_space: raise StorageError( 'disk_size is lower than allocated space: {} < {}!' .format(disk_size, allocated_space) )
def vm_define(vm_hostname): """Define VM on hypervisor This command executes necessary code to just define the VM aka create the domain.xml for libvirt. It is a convenience command to restore a domain in case you lost your SSH session while the domain was not defined. :param: vm_hostname: hostname of VM """ vm_dataset_obj = Query({'hostname': vm_hostname}, VM_ATTRIBUTES).get() hv = Hypervisor(vm_dataset_obj['hypervisor']) vm = VM(vm_dataset_obj, hv) hv.define_vm(vm) vm.start() log.info('VM {} defined and booted on {}'.format( vm_hostname, vm_dataset_obj['hypervisor']['hostname']))
def test_vm_define(self): vm_dataset_obj = Query({'hostname': VM_HOSTNAME}, VM_ATTRIBUTES).get() hv = Hypervisor(vm_dataset_obj['hypervisor']) vm = VM(vm_dataset_obj, hv) vm_stop(VM_HOSTNAME) hv.undefine_vm(vm, keep_storage=True) self.check_vm_absent() vm_define(VM_HOSTNAME) self.check_vm_present()
def _get_vm(hostname, unlock=True, allow_retired=False): """Get a server from Serveradmin by hostname to return VM object The function is accepting hostnames in any length as long as it resolves to a single server on Serveradmin. """ object_id = Query({ 'hostname': Any(hostname, StartsWith(hostname + '.')), 'servertype': 'vm', }, ['object_id']).get()['object_id'] def vm_query(): return Query({ 'object_id': object_id, }, VM_ATTRIBUTES).get() dataset_obj = vm_query() hypervisor = None if dataset_obj['hypervisor']: hypervisor = Hypervisor(dataset_obj['hypervisor']) # XXX: Ugly hack until adminapi supports modifying joined objects dict.__setitem__( dataset_obj, 'hypervisor', dataset_obj['hypervisor']['hostname'] ) vm = VM(dataset_obj, hypervisor) vm.acquire_lock() try: if not allow_retired and dataset_obj['state'] == 'retired': raise InvalidStateError( 'VM {} is in state retired, I refuse to work on it!'.format( hostname, ) ) yield vm except (Exception, KeyboardInterrupt): VM(vm_query(), hypervisor).release_lock() raise else: # We re-fetch the VM because we can't risk commiting any other changes # to the VM than unlocking. There can be changes from failed things, # like setting memory. # Most operations require unlocking, the only exception is deleting of # a VM. After object is deleted, it can't be unlocked. if unlock: VM(vm_query(), hypervisor).release_lock()
def estimate_cpu_cores_used(self, vm: VM) -> float: """Estimate the number of CPU cores used by the VM Estimate the number of CPU cores used by the VM on the Hypervisor based on the known data of the past 24 hours by using the mathematical quotient of the VM performance value and the Hypervisors cpu_perffactor. :param: vm: VM object :return: number of CPU cores used on Hypervisor """ vm_performance_value = vm.performance_value() # Serveradmin can not handle floats right now so we safe them as # multiple ones of thousand and just divide them here again. hv_cpu_perffactor = self.dataset_obj['cpu_perffactor'] / 1000 cpu_cores_used = vm_performance_value / hv_cpu_perffactor return float(cpu_cores_used)
def _wait_for_shutdown( self, vm: VM, no_shutdown: bool, transaction: Transaction, ): """ If no_shutdown=True, will wait for the manual VM shutdown. Otherwise shoutdown the VM. :param VM vm: The migrating VM :param bool no_shutdown: if the VM must be shut down manualy :param Transaction transaction: The transaction to rollback """ vm.set_state('maintenance', transaction=transaction) if vm.is_running(): if no_shutdown: log.info('Please shut down the VM manually now') vm.wait_for_running(running=False, timeout=86400) else: vm.shutdown( check_vm_up_on_transaction=False, transaction=transaction, )
def migrate_vm( self, vm: VM, target_hypervisor: 'Hypervisor', offline: bool, offline_transport: str, transaction: Transaction, no_shutdown: bool, disk_size: int = 0, ): self._vm_apply_new_disk_size( vm, offline, offline_transport, transaction, disk_size ) if offline: log.info( 'Starting offline migration of vm {} from {} to {}'.format( vm, vm.hypervisor, target_hypervisor) ) target_hypervisor.create_vm_storage(vm, transaction) if offline_transport == 'drbd': is_lvm_storage = ( self.get_storage_type() == 'logical' and target_hypervisor.get_storage_type() == 'logical' ) if not is_lvm_storage: raise NotImplementedError( 'DRBD migration is supported only between hypervisors ' 'using LVM storage!' ) host_drbd = DRBD(self, vm, master_role=True) peer_drbd = DRBD(target_hypervisor, vm) if vm.hypervisor.vm_running(vm): vm_block_size = vm.get_block_size('/dev/vda') src_block_size = vm.hypervisor.get_block_size( vm.hypervisor.get_volume_by_vm(vm).path() ) dst_block_size = target_hypervisor.get_block_size( target_hypervisor.get_volume_by_vm(vm).path() ) log.debug( 'Block sizes: VM {}, Source HV {}, Destination HV {}' .format(vm_block_size, src_block_size, dst_block_size) ) vm.set_block_size('vda', min( vm_block_size, src_block_size, dst_block_size, )) with host_drbd.start(peer_drbd), peer_drbd.start(host_drbd): # XXX: Do we really need to wait for the both? host_drbd.wait_for_sync() peer_drbd.wait_for_sync() self._wait_for_shutdown(vm, no_shutdown, transaction) elif offline_transport == 'netcat': self._wait_for_shutdown(vm, no_shutdown, transaction) vm_disk_path = target_hypervisor.get_volume_by_vm(vm).path() with target_hypervisor.netcat_to_device(vm_disk_path) as args: self.device_to_netcat( self.get_volume_by_vm(vm).path(), vm.dataset_obj['disk_size_gib'] * 1024 ** 3, args, ) elif offline_transport == 'xfs': self._wait_for_shutdown(vm, no_shutdown, transaction) with target_hypervisor.xfsrestore(vm, transaction) as listener: self.xfsdump(vm, listener, transaction) target_hypervisor.wait_for_xfsrestore(vm) target_hypervisor.check_xfsrestore_log(vm) target_hypervisor.umount_vm_storage(vm) target_hypervisor.define_vm(vm, transaction) else: # For online migrations always use same volume name as VM # already has. target_hypervisor.create_vm_storage( vm, transaction, vm.hypervisor.get_volume_by_vm(vm).name(), ) migrate_live(self, target_hypervisor, vm, self._get_domain(vm))