def test_malformed_exception_str(self): bad_res = {} try: response.is_error(bad_res) except response.MalformedResponse as ex: self.assertEqual(str(ex), "Missing required key in {}")
def _startUnderlyingMigration(self, startTime): if self.hibernating: hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source(dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(self._machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule, self._use_convergence_schedule) if self._use_convergence_schedule: self._perform_with_conv_schedule(duri, muri) else: self._perform_with_downtime_thread(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _startUnderlyingMigration(self, startTime, migrationParams, machineParams): if self.hibernating: self._started = True self._vm.hibernate(self._dst) else: self._vm.prepare_migration() # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) self._started = True if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+{}://{}/system'.format( transport, normalize_literal_addr(self.remoteHost)) dstqemu = migrationParams['dstqemu'] if dstqemu: muri = 'tcp://{}'.format( normalize_literal_addr(dstqemu)) else: muri = 'tcp://{}'.format( normalize_literal_addr(self.remoteHost)) self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule, self._use_convergence_schedule) if self._use_convergence_schedule: self._perform_with_conv_schedule(duri, muri) else: self._perform_with_downtime_thread(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _startUnderlyingMigration(self, startTime): if self.hibernating: hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source( dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(self._machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule, self._use_convergence_schedule) if self._use_convergence_schedule: self._perform_with_conv_schedule(duri, muri) else: self._perform_with_downtime_thread(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _recover(self, message): if not response.is_error(self.status): self.status = response.error('migrateErr') self.log.error(message) if not self.hibernating and self._destServer is not None: if self._vm.post_copy == PostCopyPhase.RUNNING: # We can't recover a VM after a failed post-copy migration. # And the destination takes care of the situation itself. self._vm.handle_failed_post_copy(clean_vm=True) return try: self._destServer.destroy(self._vm.id) except Exception: self.log.exception("Failed to destroy remote VM") # if the guest was stopped before migration, we need to cont it if self.hibernating: self._vm.cont(ignoreStatus=True) if self._enableGuestEvents: self._vm.guestAgent.events.after_hibernation_failure() elif self._enableGuestEvents: self._vm.guestAgent.events.after_migration_failure() # either way, migration has finished if self._recovery: self._vm.set_last_status(vmstatus.UP, vmstatus.MIGRATION_SOURCE) self._recovery = False else: self._vm.lastStatus = vmstatus.UP self._started = False self._vm.send_status_event()
def test_interface_update(self): devices = [{'nicModel': 'virtio', 'network': 'ovirtmgmt', 'macAddr': '52:54:00:59:F5:3F', 'device': 'bridge', 'type': 'interface', 'alias': 'net1', 'name': 'net1', 'linkActive': 'true', 'specParams': {'inbound': {'average': 1000, 'peak': 5000, 'burst': 1024}, 'outbound': {'average': 128, 'burst': 256}}, }] params = {'linkActive': 'true', 'alias': 'net1', 'deviceType': 'interface', 'network': 'ovirtmgmt2', 'specParams': {'inbound': {}, 'outbound': {}}} updated_xml = ''' <interface type="bridge"> <mac address="52:54:00:59:F5:3F"/> <model type="virtio"/> <source bridge="ovirtmgmt2"/> <virtualport type="openvswitch"/> <link state="up"/> <bandwidth/> </interface> ''' with fake.VM(devices=devices, create_device_objects=True) as testvm: testvm._dom = fake.Domain() res = testvm.updateDevice(params) self.assertFalse(response.is_error(res)) self.assertXMLEqual(testvm._dom.devXml, updated_xml)
def _remove_scratch_disks(vm, backup_id): log.info("Removing scratch disks for backup id: %s", backup_id) res = vm.cif.irs.list_transient_disks(vm.id) if response.is_error(res): raise exception.BackupError( reason="Failed to fetch scratch disks: {}".format(res), vm_id=vm.id, backup_id=backup_id) for disk_name in res['result']: res = vm.cif.irs.remove_transient_disk(vm.id, disk_name) if response.is_error(res): log.error( "Failed to remove backup '%s' " "scratch disk for drive name: %s, ", backup_id, disk_name)
def test_migrate_from_status(self, vm_status): with MonkeyPatchScope([ (migration, 'SourceThread', fake.MigrationSourceThread) ]): with fake.VM(status=vm_status, cif=self.cif) as testvm: res = testvm.migrate({}) # no params needed self.assertFalse(response.is_error(res))
def test_create_with_missing_boot_disk(self): res = self.vm.create({ 'vmId': self.uuid, 'memSize': 0, 'boot': 'c', }) self.assertTrue(response.is_error(res, 'MissParam'))
def test_no_callbacks(self): vm = FakeVM(self.dom, FakeGuestAgent(responsive=False), acpiEnable='false') obj = make_object('VmPowerDown', vm, self.event) res = obj.start() self.assertTrue(response.is_error(res, 'exist'))
def _recover(self, message): if not response.is_error(self.status): self.status = response.error('migrateErr') self.log.error(message) if not self.hibernating and self._destServer is not None: if self._vm.post_copy == PostCopyPhase.RUNNING: # We can't recover a VM after a failed post-copy migration. # And the destination takes care of the situation itself. self._vm.handle_failed_post_copy(clean_vm=True) return try: self._destServer.destroy(self._vm.id) except Exception: self.log.exception("Failed to destroy remote VM") # if the guest was stopped before migration, we need to cont it if self.hibernating: self._vm.cont(ignoreStatus=True) if self._enableGuestEvents: self._vm.guestAgent.events.after_hibernation_failure() elif self._enableGuestEvents: self._vm.guestAgent.events.after_migration_failure() # either way, migration has finished self._failed = True if self._recovery: self._vm.set_last_status(vmstatus.UP, vmstatus.MIGRATION_SOURCE) self._recovery = False else: self._vm.lastStatus = vmstatus.UP self._started = False self._vm.send_status_event()
def _prepare_device(storage, device): lease = dict(sd_id=device["sd_id"], lease_id=device["lease_id"]) res = storage.lease_info(lease) if response.is_error(res): raise CannotPrepare(device, res["status"]["message"]) lease_info = res["result"] device["path"] = lease_info["path"] device["offset"] = lease_info["offset"]
def qemuGuestAgentCallback(self): # TODO: QEMU GA does not support setting delay for reboot right # now, but it may get this functionality in the future. When # the feature is implemented in the future it should be also # added here. if response.is_error(self.vm.qemuGuestAgentReboot()): return False return self.event.wait(self.timeout)
def test_create_fix_param_vmName(self): vmParams = { 'vmId': self.uuid, 'memSize': 8 * 1024, } res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertEqual(vmParams.get('vmName'), 'n%s' % self.uuid)
def test_with_default_callbacks(self): vm = FakeVM(self.dom, FakeGuestAgent(responsive=True), acpiEnable='true') obj = make_object('VmPowerDown', vm, self.event) # no actual callback will be called now! res = obj.start() self.assertFalse(response.is_error(res))
def testGetConvertedVMErrorFlow(self, exc): def _raise_error(*args, **kwargs): raise exc() # we monkeypatch the very first utility function called with MonkeyPatchScope([(v2v, '_get_job', _raise_error)]): # we use uuid to fill the API contract, but it is unused res = v2v.get_converted_vm(str(uuid.uuid4())) self.assertTrue(response.is_error(res))
def test_no_callbacks(self): vm = FakeVM( self.dom, FakeGuestAgent(responsive=False), acpiEnable='false' ) obj = make_powerdown(vm, self.event) res = obj.start() self.assertTrue(response.is_error(res, 'exist'))
def test_hibernation_params_map_memory_dump(self): vmParams = {'hiberVolHandle': self._hibernation_volume_old_format} vmParams.update(self.vmParams) res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) expected_memory_dump = {'device': 'disk', 'domainID': '0', 'poolID': '1', 'imageID': '2', 'volumeID': '3'} self.assertEqual(expected_memory_dump, vmParams['restoreState'])
def test_migrate_from_status(self, vm_status, pause_code): with MonkeyPatchScope([(migration, 'SourceThread', fake.MigrationSourceThread)]): with fake.VM(cif=self.cif, status=vm_status, runCpu=(vm_status == vmstatus.UP), pause_code=pause_code) as testvm: res = testvm.migrate({}) # no params needed assert not response.is_error(res)
def test_create_unsupported_graphics(self): vmParams = { 'vmId': self.uuid, 'memSize': 8 * 1024, 'vmType': 'kvm', 'display': 'unsupported', } res = self.vm.create(vmParams) self.assertTrue(response.is_error(res, 'createErr'))
def test_create_fix_param_kvmEnable(self): vmParams = { 'vmId': self.uuid, 'memSize': 8 * 1024, 'vmType': 'kvm', } res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertTrue(conv.tobool(vmParams.get('kvmEnable')))
def acpiCallback(self): self.vm.log.debug("Rebooting with ACPI") if response.is_error(self.vm.acpiReboot()): self.vm.log.warn("Rebooting with ACPI FAILED") return False return utils.log_success(self.event.wait(self.timeout), self.vm.log, "Rebooting with ACPI succeeded", "Rebooting with ACPI timed out")
def createVm(self, vmParams, vmRecover=False): with self.vmContainerLock: if not vmRecover: if vmParams['vmId'] in self.vmContainer: return errCode['exist'] vm = Vm(self, vmParams, vmRecover) ret = vm.run() if not response.is_error(ret): self.vmContainer[vm.id] = vm return ret
def createVm(self, vmParams, vmRecover=False): with self.vmContainerLock: if not vmRecover: if vmParams['vmId'] in self.vmContainer: return errCode['exist'] vm = Vm(self, vmParams, vmRecover) ret = vm.run() if not response.is_error(ret): self.vmContainer[vmParams['vmId']] = vm return ret
def test_with_default_callbacks(self): vm = FakeVM( self.dom, FakeGuestAgent(responsive=True), acpiEnable='true' ) obj = make_powerdown(vm, self.event) # no actual callback will be called now! res = obj.start() self.assertFalse(response.is_error(res))
def start_backup(vm, dom, config): backup_cfg = BackupConfig(config) if not backup_cfg.disks: raise exception.BackupError( reason="Cannot start a backup without disks", backup=backup_cfg.backup_id) drives = _get_disks_drives(vm, backup_cfg) path = socket_path(backup_cfg.backup_id) nbd_addr = nbdutils.UnixAddress(path) # Create scratch disk for each drive _create_scratch_disks(vm, dom, backup_cfg.backup_id, drives) try: res = vm.freeze() if response.is_error(res) and backup_cfg.require_consistency: raise exception.BackupError(reason="Failed freeze VM: {}".format( res["status"]["message"]), vm_id=vm.id, backup=backup_cfg) backup_xml = create_backup_xml(nbd_addr, drives, backup_cfg.from_checkpoint_id) checkpoint_xml = create_checkpoint_xml(backup_cfg, drives) vm.log.info( "Starting backup for backup_id: %r, " "backup xml: %s\ncheckpoint xml: %s", backup_cfg.backup_id, backup_xml, checkpoint_xml) _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml) except: # remove all the created scratch disks _remove_scratch_disks(vm, backup_cfg.backup_id) raise finally: # Must always thaw, even if freeze failed; in case the guest # did freeze the filesystems, but failed to reply in time. # Libvirt is using same logic (see src/qemu/qemu_driver.c). vm.thaw() disks_urls = { img_id: nbd_addr.url(drive.name) for img_id, drive in six.iteritems(drives) } result = {'disks': disks_urls} if backup_cfg.to_checkpoint_id is not None: _add_checkpoint_xml(vm, dom, backup_cfg.backup_id, backup_cfg.to_checkpoint_id, result) return dict(result=result)
def test_hibernation_params_requested_but_missing(self): vmParams = {'hiberVolHandle': '/this/path/does/not/exist/'} vmParams.update(self.vmParams) refParams = copy.deepcopy(vmParams) del refParams['hiberVolHandle'] # must go away refParams['restoreState'] = True # to be added BY TESTS res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertEqual(refParams, vmParams)
def test_create_twice(self): vmParams = { 'vmId': self.uuid, } vm = FakeVM(self.cif, vmParams) self.cif.vmContainer[vm.id] = vm try: res = self.vm.create({}) self.assertTrue(response.is_error(res, 'exist')) finally: del self.cif.vmContainer[vm.id] self.assertEqual(self.cif.vmContainer, {})
def test_hibernation_params_requested_but_missing(self): vmParams = { 'hiberVolHandle': '/this/path/does/not/exist/' } vmParams.update(self.vmParams) refParams = copy.deepcopy(vmParams) del refParams['hiberVolHandle'] # must go away refParams['restoreState'] = True # to be added BY TESTS res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertEqual(refParams, vmParams)
def test_nic_hotunplug_timeout(self): vm = self.vm self.test_nic_hotplug() self.assertEqual(len(vm._devices[hwclass.NIC]), 2) params = {'xml': self.NIC_HOTPLUG} with MonkeyPatchScope([ (vdsm.common.supervdsm, 'getProxy', self.supervdsm.getProxy), (vdsm.virt.vm, 'config', make_config([('vars', 'hotunplug_timeout', '0'), ('vars', 'hotunplug_check_interval', '0.01')])), ]): self.vm._dom.vm = None self.assertTrue(response.is_error(vm.hotunplugNic(params))) self.assertEqual(len(vm._devices[hwclass.NIC]), 2)
def load(self, cif): self._log.debug("recovery: trying with VM %s", self._vmid) try: with open(self._path) as src: params = pickle.load(src) self._set_elapsed_time(params) res = cif.createVm(params, vmRecover=True) except Exception: self._log.exception("Error recovering VM: %s", self._vmid) return False else: if response.is_error(res): return False return True
def refresh_destination_disk(self, vol_pdiv): """ Refresh drive on the destination host. """ if self._supports_disk_refresh is None: caps = self._destServer.getVdsCapabilities() if response.is_error(caps): self.log.warning( "Failed to get destination host capabilities: %s", caps["status"]["message"]) self._supports_disk_refresh = False else: self._supports_disk_refresh = caps.get( "refresh_disk_supported", False) if not self._supports_disk_refresh: raise exception.DiskRefreshNotSupported() result = self._destServer.refresh_disk(self._vm.id, vol_pdiv) if response.is_error(result): raise exception.CannotRefreshDisk( reason=result["status"]["message"]) return VolumeSize(int(result["apparentsize"]), int(result["truesize"]))
def _create_transient_disk(vm, dom, backup_id, drive): disk_name = "{}.{}".format(backup_id, drive.name) drive_size = _get_drive_capacity(dom, drive) res = vm.cif.irs.create_transient_disk(owner_name=vm.id, disk_name=disk_name, size=drive_size) if response.is_error(res): raise exception.BackupError( reason='Failed to create transient disk: {}'.format(res), vm_id=vm.id, backup_id=backup_id, drive_name=drive.name) return res['result']['path']
def test_hibernation_params(self): vmParams = {} vmParams.update(self.vmParams) extraParams = { 'a': 42, 'foo': ['bar'], } with temporaryPath(data=pickle.dumps(extraParams)) as path: vmParams['hiberVolHandle'] = path res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) for param in extraParams: self.assertEqual(extraParams[param], vmParams[param])
def test_hibernation_params_wrong_format(self): vmParams = {} vmParams.update(self.vmParams) refParams = copy.deepcopy(vmParams) refParams['restoreState'] = True # to be added BY TESTS extraParams = ['a', 42] with temporaryPath(data=pickle.dumps(extraParams)) as path: vmParams['hiberVolHandle'] = path res = self.vm.create(vmParams) res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertEqual(refParams, vmParams)
def test_delayed_nic_hotunplug(self): vm = self.vm self.test_nic_hotplug() assert len(vm._devices[hwclass.NIC]) == 2 params = {'xml': self.NIC_HOTPLUG} with MonkeyPatchScope([ (vdsm.common.supervdsm, 'getProxy', self.supervdsm.getProxy), (vdsm.virt.vm, 'config', make_config([('vars', 'hotunplug_timeout', '0'), ('vars', 'hotunplug_check_interval', '0.01')])), ]): self.vm._dom.vm = None assert response.is_error(vm.hotunplugNic(params)) self.vm.onDeviceRemoved('ua-nic-hotplugged') assert len(vm._devices[hwclass.NIC]) == 1
def _recover_domain(cif, vm_id, dom_xml, external): external_str = " (external)" if external else "" cif.log.debug("recovery: trying with VM%s %s", external_str, vm_id) try: res = cif.createVm(_recovery_params(vm_id, dom_xml, external), vmRecover=True) except Exception: cif.log.exception("Error recovering VM%s: %s", external_str, vm_id) return False if response.is_error(res): cif.log.info("Failed to recover VM%s: %s (%s)", external_str, vm_id, res) return False cif.log.info("VM recovered: %s", vm_id) return True
def test_create_without_memsize(self): res = self.vm.create({'vmId': self.uuid}) self.assertTrue(response.is_error(res, 'MissParam'))
def test_create_with_zero_memsize(self): res = self.vm.create({ 'vmId': self.uuid, 'memSize': 0, }) self.assertTrue(response.is_error(res, 'MissParam'))
def test_create_with_invalid_id(self): # anything which doesn't look like an UUID res = self.vm.create({'vmId': 'foobar'}) self.assertTrue(response.is_error(res, 'MissParam'))
def test_create_without_id(self): res = self.vm.create({}) self.assertTrue(response.is_error(res, 'MissParam'))
def testAcpiRebootConnected(self): with fake.VM() as testvm: testvm._dom = fake.Domain(vmId='testvm') self.assertFalse(response.is_error(testvm.acpiReboot()))
def acpiCallback(self): if response.is_error(self.vm.acpiShutdown()): return False return self.event.wait(self.timeout)
def testAcpiRebootDisconnected(self): with fake.VM() as testvm: testvm._dom = virdomain.Disconnected(vmid='testvm') self.assertTrue(response.is_error(testvm.acpiReboot()))
def test_success_with_return_dict_override_message(self): message = "this message overrides the default" res = self.vm.succeed_with_return({"message": message}) self.assertEqual(response.is_error(res), False) self.assertEqual(res["status"]["message"], message)