class VirtTestBase(TestCaseBase, verify.DeviceMixin): UPSTATES = frozenset((vmstatus.UP, vmstatus.POWERING_UP)) def setUp(self): self.vdsm = VdsProxy() def _getVmStatus(self, vmid): status, msg, result = self.vdsm.getVmStats(vmid) self.assertEqual(status, SUCCESS, msg) return result def assertQemuSetupComplete(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != vmstatus.WAIT_FOR_LAUNCH, 'VM is not booting!') def assertVmBooting(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != vmstatus.DOWN, 'VM is not booting!') def assertVmUp(self, vmid): result = self._getVmStatus(vmid) self.assertIn(result['status'], self.UPSTATES) def assertVmDown(self, vmid): result = self._getVmStatus(vmid) self.assertEqual(result['status'], vmstatus.DOWN) def assertGuestUp(self, vmid, targetUptime=0): result = self._getVmStatus(vmid) if targetUptime > 0: self.assertTrue(int(result['elapsedTime']) >= targetUptime) else: self.assertEquals(result['status'], vmstatus.UP) def _waitForBoot(self, vmid): self.retryAssert(partial(self.assertQemuSetupComplete, vmid), timeout=10) self.retryAssert(partial(self.assertVmBooting, vmid), timeout=3) self.retryAssert(partial(self.assertVmUp, vmid), timeout=10) def _waitForStartup(self, vmid, targetUptime=0): self._waitForBoot(vmid) # 20 % more time on timeout self.retryAssert(partial(self.assertGuestUp, vmid, targetUptime), timeout=math.ceil(targetUptime * 1.2)) def _waitForShutdown(self, vmid): self.retryAssert(partial(self.assertVmDown, vmid), timeout=10) def _verifyDevices(self, vmId): status, msg, stats = self.vdsm.getVmList(vmId) self.assertEqual(status, SUCCESS, msg) self.verifyDevicesConf(conf=stats['devices'])
class MOMTest(TestCaseBase): # Define the initial, low and high value of shrink and grow operation. # Initial is the 'balloon_cur' value before the operation performed. # (low, high) is the proper range for 'balloon_cur' after the # operation. This range is calculated according to initial value, # expected value and adjustment step in policy. # This range also takes accuracy impact into account(The number is # rounded to integer). BalloonRatio = namedtuple('BalloonRatio', 'initial, low, high') def setUp(self): self.s = VdsProxy() @testValidation.ValidateRunningAsRoot @skipNoMOM def testKSM(self): run = 1 pages_to_scan = random.randint(100, 200) # Set a simple MOM policy to change KSM parameters unconditionally. testPolicyStr = """ (Host.Control "ksm_run" %d) (Host.Control "ksm_pages_to_scan" %d)""" % \ (run, pages_to_scan) status, msg = self.s.setMOMPolicy(testPolicyStr) self.assertEqual(status, SUCCESS, msg) # Wait for the policy taking effect time.sleep(10) status, msg, hostStats = self.s.getVdsStats() self.assertEqual(bool(run), hostStats['ksmState']) self.assertEqual(pages_to_scan, hostStats['ksmPages']) def _statsOK(self, stats): try: return stats['status'] == 'Up' and stats['balloonInfo'] \ and stats['memoryStats'] except KeyError: return False def _prepare(self, balloonRatio): # Get vms' statistics before the operation. status, msg, statsList = self.s.getAllVmStats() self.assertEqual(status, SUCCESS, msg) # Filter all vms' statistics to get balloon operation candidates. candidateStats = filter(self._statsOK, statsList) # Set the balloon target to initial value before shrink # or grow operation. # The initial value is max for shrink operation and # 0.95*max for grow operation. for stats in candidateStats: initial = int(stats['balloonInfo']['balloon_max']) * \ balloonRatio.initial if int(stats['balloonInfo']['balloon_cur']) != initial: status, msg = self.s.setBalloonTarget( stats['vmId'], initial) self.assertEqual(status, SUCCESS, msg) return [stats['vmId'] for stats in candidateStats] def _setCpuTune(self, vcpuQuota, vcpuPeriod): # Get vms' statistics before the operation. status, msg, statsList = self.s.getAllVmStats() self.assertEqual(status, SUCCESS, msg) # Filter all vms' statistics to get balloon operation candidates. candidateStats = filter(self._statsOK, statsList) # Set the balloon target to initial value before shrink # or grow operation. # The initial value is max for shrink operation and # 0.95*max for grow operation. for stats in candidateStats: status, msg = self.s.setCpuTuneQuota( stats['vmId'], vcpuQuota) self.assertEqual(status, SUCCESS, msg) status, msg = self.s.setCpuTunePeriod( stats['vmId'], vcpuPeriod) self.assertEqual(status, SUCCESS, msg) def _setPolicy(self, policy): curpath = os.path.dirname(__file__) file_name = os.path.join(curpath, policy) try: with open(file_name, 'r') as f: testPolicyStr = f.read() except IOError as e: if e.errno == errno.ENOENT: raise SkipTest('The policy file %s is missing.' % file_name) else: raise SkipTest(e.message) status, msg = self.s.setMOMPolicy(testPolicyStr) self.assertEqual(status, SUCCESS, msg) def _checkResult(self, vmCandidates, balloonRatio): # Check the new balloon_cur in the proper range. for vmId in vmCandidates: r = self.s.getVmStats(vmId) if len(r) == 2: status, msg = r else: status, msg, vmNewStats = r # Vm doesn't exist. if status == errCode['noVM']['status']['code']: continue else: self.assertEqual(status, SUCCESS, msg) if self._statsOK(vmNewStats): balloonMax = int(vmNewStats['balloonInfo']['balloon_max']) balloonCur = int(vmNewStats['balloonInfo']['balloon_cur']) self.assertTrue( balloonCur >= floor(balloonRatio.low * balloonMax)) self.assertTrue( balloonCur <= ceil(balloonRatio.high * balloonMax)) def _basicBalloon(self, balloonRatio, policy): vmCandidates = self._prepare(balloonRatio) if not vmCandidates: raise SkipTest('No VM can be candidate of ballooning operation.') # Set policy to trigger the balloon operation. self._setPolicy(policy) # Wait for the policy taking effect. time.sleep(22) self._checkResult(vmCandidates, balloonRatio) @testValidation.ValidateRunningAsRoot @skipNoMOM @testValidation.slowtest def testBalloonShrink(self): self._basicBalloon(self.BalloonRatio(1, 0.9475, 0.95), '60_test_balloon_shrink.policy') @testValidation.ValidateRunningAsRoot @skipNoMOM @testValidation.slowtest def testBalloonGrow(self): self._basicBalloon(self.BalloonRatio(0.95, 0.9975, 1), '70_test_balloon_grow.policy') @testValidation.ValidateRunningAsRoot @skipNoMOM @testValidation.slowtest def testCpuTune(self): self._setCpuTune(2000, 10000)
class VirtTest(TestCaseBase): UPSTATES = frozenset(('Up', 'Powering up')) def setUp(self): self.vdsm = VdsProxy() def _getVmStatus(self, vmid): status, msg, result = self.vdsm.getVmStats(vmid) self.assertEqual(status, SUCCESS, msg) return result def assertQemuSetupComplete(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'WaitForLaunch', 'VM is not booting!') def assertVmBooting(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'Down', 'VM is not booting!') def assertVmUp(self, vmid): result = self._getVmStatus(vmid) self.assertIn(result['status'], self.UPSTATES) def assertGuestUp(self, vmid, targetUptime=0): result = self._getVmStatus(vmid) if targetUptime > 0: self.assertTrue(int(result['elapsedTime']) >= targetUptime) else: self.assertEquals(result['status'], 'Up') def _waitForStartup(self, vmid, targetUptime=0): self.retryAssert(partial(self.assertQemuSetupComplete, vmid), timeout=10) self.retryAssert(partial(self.assertVmBooting, vmid), timeout=3) self.retryAssert(partial(self.assertVmUp, vmid), timeout=10) # 20 % more time on timeout self.retryAssert(partial(self.assertGuestUp, vmid, targetUptime), timeout=math.ceil(targetUptime * 1.2)) @requireKVM def testSimpleVm(self): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testSimpleVm'} with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) @requireKVM @permutations([['localfs'], ['iscsi'], ['nfs']]) def testVmWithStorage(self, backendType): disk = storage.StorageTest() disk.setUp() conf = storage.storageLayouts[backendType] drives = disk.generateDriveConf(conf) customization = {'vmId': '88888888-eeee-ffff-aaaa-111111111111', 'vmName': 'testVmWithStorage' + backendType, 'drives': drives} with RollbackContext() as rollback: disk.createVdsmStorageLayout(conf, 3, rollback) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) @requireKVM @permutations([['hotplugNic'], ['virtioNic'], ['smartcard'], ['hotplugDisk'], ['virtioRng']]) def testVmWithDevice(self, *devices): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testVm', 'devices': []} storageLayout = storage.storageLayouts['localfs'] diskSpecs = storage.StorageTest.generateDriveConf(storageLayout) pciSpecs = {'bus': '0x00', 'domain': '0x0000', 'function': '0x0', 'type': 'pci'} ccidSpecs = {'slot': '0', 'controller': '0', 'type': 'ccid'} pciSlots = [dict({'slot': '0x01'}, **pciSpecs), dict({'slot': '0x02'}, **pciSpecs), dict({'slot': '0x03'}, **pciSpecs)] deviceDef = {'virtioNic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F', 'network': '', 'address': pciSlots[2], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}, 'hotplugNic': {'vmId': customization['vmId'], 'nic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:2F', 'network': '', 'address': pciSlots[1], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}}, 'smartcard': {'type': 'smartcard', 'device': 'smartcard', 'address': ccidSpecs, 'alias': 'smartcard', 'specParams': {'type': 'spicevmc', 'mode': 'passthrough'}}, 'hotplugDisk': {'vmId': customization['vmId'], 'drive': diskSpecs}} if 'virtioRng' in devices: status, msg, caps = self.vdsm.getVdsCapabilities() self.assertEqual(status, SUCCESS, msg) if not caps['rngSources']: raise SkipTest('No suitable rng source on host found') # we can safely pick any device as long, as it exists deviceDef['virtioRng'] = {'type': 'rng', 'model': 'virtio', 'specParams': {'bytes': '1234', 'period': '20000', 'source': caps['rngSources'][0]}} for device in devices: if 'hotplug' not in device: customization['devices'].append(deviceDef[device]) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) if 'hotplugNic' in devices: self.retryAssert(partial(self.vdsm.hotplugNic, deviceDef['hotplugNic']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugNic, deviceDef['hotplugNic']), timeout=10) if 'hotplugDisk' in devices: self.retryAssert(partial(self.vdsm.hotplugDisk, deviceDef['hotplugDisk']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugDisk, deviceDef['hotplugDisk']), timeout=10)
class VirtTest(TestCaseBase): UPSTATES = frozenset(('Up', 'Powering up')) def setUp(self): self.vdsm = VdsProxy() def _getVmStatus(self, vmid): status, msg, result = self.vdsm.getVmStats(vmid) self.assertEqual(status, SUCCESS, msg) return result def assertQemuSetupComplete(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'WaitForLaunch', 'VM is not booting!') def assertVmBooting(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'Down', 'VM is not booting!') def assertVmUp(self, vmid): result = self._getVmStatus(vmid) self.assertIn(result['status'], self.UPSTATES) def assertGuestUp(self, vmid, targetUptime=0): result = self._getVmStatus(vmid) if targetUptime > 0: self.assertTrue(int(result['elapsedTime']) >= targetUptime) else: self.assertEquals(result['status'], 'Up') def _waitForStartup(self, vmid, targetUptime=0): self.retryAssert(partial(self.assertQemuSetupComplete, vmid), timeout=10) self.retryAssert(partial(self.assertVmBooting, vmid), timeout=3) self.retryAssert(partial(self.assertVmUp, vmid), timeout=10) # 20 % more time on timeout self.retryAssert(partial(self.assertGuestUp, vmid, targetUptime), timeout=math.ceil(targetUptime * 1.2)) @requireKVM def testSimpleVm(self): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testSimpleVm'} with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, VM_MINIMAL_UPTIME) @requireKVM @permutations([['localfs'], ['iscsi'], ['nfs']]) def testVmWithStorage(self, backendType): disk = storage.StorageTest() disk.setUp() conf = storage.storageLayouts[backendType] drives = disk.generateDriveConf(conf) customization = {'vmId': '88888888-eeee-ffff-aaaa-111111111111', 'vmName': 'testVmWithStorage' + backendType, 'drives': drives} with RollbackContext() as rollback: disk.createVdsmStorageLayout(conf, 3, rollback) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, VM_MINIMAL_UPTIME) @requireKVM @permutations([['hotplugNic'], ['virtioNic'], ['smartcard'], ['hotplugDisk'], ['virtioRng']]) def testVmWithDevice(self, *devices): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testVm', 'devices': []} storageLayout = storage.storageLayouts['localfs'] diskSpecs = storage.StorageTest.generateDriveConf(storageLayout) pciSpecs = {'bus': '0x00', 'domain': '0x0000', 'function': '0x0', 'type': 'pci'} ccidSpecs = {'slot': '0', 'controller': '0', 'type': 'ccid'} pciSlots = [dict({'slot': '0x01'}, **pciSpecs), dict({'slot': '0x02'}, **pciSpecs), dict({'slot': '0x03'}, **pciSpecs)] deviceDef = {'virtioNic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F', 'network': '', 'address': pciSlots[2], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}, 'hotplugNic': {'vmId': customization['vmId'], 'nic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:2F', 'network': '', 'address': pciSlots[1], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}}, 'smartcard': {'type': 'smartcard', 'device': 'smartcard', 'address': ccidSpecs, 'alias': 'smartcard', 'specParams': {'type': 'spicevmc', 'mode': 'passthrough'}}, 'hotplugDisk': {'vmId': customization['vmId'], 'drive': diskSpecs}} if 'virtioRng' in devices: status, msg, caps = self.vdsm.getVdsCapabilities() self.assertEqual(status, SUCCESS, msg) if not caps['rngSources']: raise SkipTest('No suitable rng source on host found') # we can safely pick any device as long, as it exists deviceDef['virtioRng'] = {'type': 'rng', 'model': 'virtio', 'specParams': {'bytes': '1234', 'period': '20000', 'source': caps['rngSources'][0]}} for device in devices: if 'hotplug' not in device: customization['devices'].append(deviceDef[device]) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, VM_MINIMAL_UPTIME) if 'hotplugNic' in devices: self.retryAssert(partial(self.vdsm.hotplugNic, deviceDef['hotplugNic']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugNic, deviceDef['hotplugNic']), timeout=10) if 'hotplugDisk' in devices: self.retryAssert(partial(self.vdsm.hotplugDisk, deviceDef['hotplugDisk']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugDisk, deviceDef['hotplugDisk']), timeout=10) @permutations([['self'], ['specParams'], ['vmPayload']]) def testVmWithCdrom(self, pathLocation): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'devices': [], 'vmName': ('testVmWithCdrom_{}').format(pathLocation)} # echo -n testPayload | md5sum # d37e46c24c78b1aed33496107afdb44b vmPayloadName = ('/var/run/vdsm/payload/{}.' 'd37e46c24c78b1aed33496107afdb44b' '.img').format(customization['vmId']) cdrom = {'index': '2', 'iface': 'ide', 'specParams': {}, 'readonly': 'true', 'path': '', 'device': 'cdrom', 'shared': 'false', 'type': 'disk'} with temporaryPath(0o666) as path: cdromPaths = {'self': {'path': path, 'specParams': {'path': '/dev/null'}}, 'specParams': {'path': '', 'specParams': {'path': path}}, 'vmPayload': {'path': '', 'specParams': {'path': '', 'vmPayload': {'volId': 'testConfig', 'file': {'testPayload': ''}}}}} cdrom.update(cdromPaths[pathLocation]) customization['devices'].append(cdrom) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) status, msg, stats = self.vdsm.getVmList(vm) self.assertEqual(status, SUCCESS, msg) for device in stats['devices']: if device['device'] == 'cdrom': if 'vmPayload' in cdrom['specParams']: cdrom['path'] = vmPayloadName self.assertEqual(device['path'], cdrom['path']) self.assertEqual(device['specParams']['path'], cdrom['specParams']['path'])