def test_malformed_exception_str(self): bad_res = {} try: response.is_error(bad_res) except response.MalformedResponse as ex: self.assertEqual(str(ex), "Missing required key in {}")
def _startUnderlyingMigration(self, startTime): if self.hibernating: hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source( dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(self._machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule, self._use_convergence_schedule) if self._use_convergence_schedule: self._perform_with_conv_schedule(duri, muri) else: self._perform_with_downtime_thread(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def _startUnderlyingMigration(self, startTime): if self.hibernating: hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf) fname = self._vm.cif.prepareVolumePath(self._dst) try: self._vm._dom.save(fname) finally: self._vm.cif.teardownVolumePath(self._dst) else: for dev in self._vm._customDevices(): hooks.before_device_migrate_source(dev._deviceXML, self._vm.conf, dev.custom) hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), self._vm.conf) # Do not measure the time spent for creating the VM on the # destination. In some cases some expensive operations can cause # the migration to get cancelled right after the transfer started. destCreateStartTime = time.time() result = self._destServer.migrationCreate(self._machineParams, self._incomingLimit) destCreationTime = time.time() - destCreateStartTime startTime += destCreationTime self.log.info('Creation of destination VM took: %d seconds', destCreationTime) if response.is_error(result): self.status = result if response.is_error(result, 'migrateLimit'): raise MigrationLimitExceeded() else: raise MigrationDestinationSetupError( 'migration destination error: ' + result['status']['message']) if config.getboolean('vars', 'ssl'): transport = 'tls' else: transport = 'tcp' duri = 'qemu+%s://%s/system' % (transport, self.remoteHost) if self._vm.conf['_migrationParams']['dstqemu']: muri = 'tcp://%s' % \ self._vm.conf['_migrationParams']['dstqemu'] else: muri = 'tcp://%s' % self.remoteHost self._vm.log.info('starting migration to %s ' 'with miguri %s', duri, muri) self._monitorThread = MonitorThread(self._vm, startTime, self._convergence_schedule, self._use_convergence_schedule) if self._use_convergence_schedule: self._perform_with_conv_schedule(duri, muri) else: self._perform_with_downtime_thread(duri, muri) self.log.info("migration took %d seconds to complete", (time.time() - startTime) + destCreationTime)
def test_migrate_from_status(self, vm_status, is_error, error_code): with MonkeyPatchScope([(migration, 'SourceThread', fake.MigrationSourceThread)]): with fake.VM(status=vm_status, cif=self.cif) as testvm: res = testvm.migrate({}) # no params needed self.assertEquals( response.is_error(res, error_code), is_error, )
def createVm(self, vmParams, vmRecover=False): with self.vmContainerLock: if not vmRecover: if vmParams['vmId'] in self.vmContainer: return errCode['exist'] vm = Vm(self, vmParams, vmRecover) ret = vm.run() if not response.is_error(ret): self.vmContainer[vmParams['vmId']] = vm return ret
def test_migrate_from_status(self, vm_status, is_error, error_code): with MonkeyPatchScope([ (migration, 'SourceThread', fake.MigrationSourceThread) ]): with fake.VM(status=vm_status, cif=self.cif) as testvm: res = testvm.migrate({}) # no params needed self.assertEquals( response.is_error(res, error_code), is_error, )
def load(self, cif): self._log.debug("recovery: trying with VM %s", self._vmid) try: with open(self._path) as src: params = pickle.load(src) self._set_elapsed_time(params) res = cif.createVm(params, vmRecover=True) except Exception: self._log.exception("Error recovering VM: %s", self._vmid) return False else: if response.is_error(res): return False return True
def _vm_from_file(cif, vmid): try: recovery_file = constants.P_VDSM_RUN + vmid + ".recovery" params = pickle.load(file(recovery_file)) now = time.time() pt = float(params.pop('startTime', now)) params['elapsedTimeOffset'] = now - pt cif.log.debug("recovery: trying with domain %s", vmid) if response.is_error(cif.createVm(params, vmRecover=True)): return None except: cif.log.debug("Error recovering VM", exc_info=True) return None else: return recovery_file
def _recover(self, message): if not response.is_error(self.status): self.status = response.error('migrateErr') self.log.error(message) if not self.hibernating and self._destServer is not None: try: self._destServer.destroy(self._vm.id) except Exception: self.log.exception("Failed to destroy remote VM") # if the guest was stopped before migration, we need to cont it if self.hibernating: self._vm.cont() # either way, migration has finished self._vm.lastStatus = vmstatus.UP self._vm.send_status_event()
def _recover(self, message): if not response.is_error(self.status): self.status = response.error('migrateErr') self.log.error(message) if not self.hibernating: try: self._destServer.destroy(self._vm.id) except Exception: self.log.exception("Failed to destroy remote VM") # if the guest was stopped before migration, we need to cont it if self.hibernating: self._vm.cont() # either way, migration has finished self._vm.lastStatus = vmstatus.UP self._vm.send_status_event()
def _recover(self, message): if not response.is_error(self.status): self.status = response.error('migrateErr') self.log.error(message) if not self.hibernating and self._destServer is not None: try: self._destServer.destroy(self._vm.id) except Exception: self.log.exception("Failed to destroy remote VM") # if the guest was stopped before migration, we need to cont it if self.hibernating: self._vm.cont(ignoreStatus=True) if self._enableGuestEvents: self._vm.guestAgent.events.after_hibernation_failure() elif self._enableGuestEvents: self._vm.guestAgent.events.after_migration_failure() # either way, migration has finished self._vm.lastStatus = vmstatus.UP self._vm.send_status_event()
def test_is_error(self): NAME = 'noVM' # no special meaning, any error is fine self.assertTrue(response.is_error(response.error(NAME)))
def testAcpiRebootConnected(self): with fake.VM() as testvm: testvm._dom = fake.Domain(vmId='testvm') self.assertFalse(response.is_error(testvm.acpiReboot()))
def testAcpiRebootDisconnected(self): with fake.VM() as testvm: testvm._dom = virdomain.Disconnected(vmid='testvm') self.assertTrue(response.is_error(testvm.acpiReboot()))
def test_success_with_return_dict(self): vmList = ['foobar'] res = self.vm.succeed_with_return({'vmList': vmList}) self.assertEquals(response.is_error(res), False) self.assertEquals(res['vmList'], vmList)
def test_is_specific_error(self, actual_err, expected_err): match = actual_err == expected_err self.assertEquals( match, response.is_error(response.error(actual_err), err=expected_err))
def test_legacy_error_code(self): for code, res in errCode.items(): self.assertTrue(response.is_error(res)) self.assertEqual(res, response.error(code))
def test_success_with_kwargs(self): kwargs = {"foo": "bar"} res = self.vm.succeed_with_kwargs(**kwargs) self.assertEqual(res['kwargs'], kwargs) self.assertEquals(response.is_error(res), False)
def test_malformed_exception_contains_response(self): bad_res = {} try: response.is_error(bad_res) except response.MalformedResponse as ex: self.assertEqual(ex.response, bad_res)
def acpiCallback(self): if response.is_error(self.vm.acpiShutdown()): return False return self.event.wait(self.timeout)
def test_success_with_return_dict_override_message(self): message = 'this message overrides the default' res = self.vm.succeed_with_return({'message': message}) self.assertEquals(response.is_error(res), False) self.assertEquals(res['status']['message'], message)
def test_success_with_args(self): args = ("foo", "bar") res = self.vm.succeed_with_args(*args) self.assertEquals(response.is_error(res), False) self.assertEqual(res['args'], args)
def test_is_specific_error(self, actual_err, expected_err): match = actual_err == expected_err self.assertEquals(match, response.is_error(response.error(actual_err), err=expected_err))