class VirtTest(TestCaseBase): UPSTATES = frozenset(('Up', 'Powering up')) def setUp(self): self.vdsm = VdsProxy() def _getVmStatus(self, vmid): status, msg, result = self.vdsm.getVmStats(vmid) self.assertEqual(status, SUCCESS, msg) return result def assertQemuSetupComplete(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'WaitForLaunch', 'VM is not booting!') def assertVmBooting(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'Down', 'VM is not booting!') def assertVmUp(self, vmid): result = self._getVmStatus(vmid) self.assertIn(result['status'], self.UPSTATES) def assertGuestUp(self, vmid, targetUptime=0): result = self._getVmStatus(vmid) if targetUptime > 0: self.assertTrue(int(result['elapsedTime']) >= targetUptime) else: self.assertEquals(result['status'], 'Up') def _waitForStartup(self, vmid, targetUptime=0): self.retryAssert(partial(self.assertQemuSetupComplete, vmid), timeout=10) self.retryAssert(partial(self.assertVmBooting, vmid), timeout=3) self.retryAssert(partial(self.assertVmUp, vmid), timeout=10) # 20 % more time on timeout self.retryAssert(partial(self.assertGuestUp, vmid, targetUptime), timeout=math.ceil(targetUptime * 1.2)) @requireKVM def testSimpleVm(self): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testSimpleVm'} with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) @requireKVM @permutations([['localfs'], ['iscsi'], ['nfs']]) def testVmWithStorage(self, backendType): disk = storage.StorageTest() disk.setUp() conf = storage.storageLayouts[backendType] drives = disk.generateDriveConf(conf) customization = {'vmId': '88888888-eeee-ffff-aaaa-111111111111', 'vmName': 'testVmWithStorage' + backendType, 'drives': drives} with RollbackContext() as rollback: disk.createVdsmStorageLayout(conf, 3, rollback) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) @requireKVM @permutations([['hotplugNic'], ['virtioNic'], ['smartcard'], ['hotplugDisk'], ['virtioRng']]) def testVmWithDevice(self, *devices): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testVm', 'devices': []} storageLayout = storage.storageLayouts['localfs'] diskSpecs = storage.StorageTest.generateDriveConf(storageLayout) pciSpecs = {'bus': '0x00', 'domain': '0x0000', 'function': '0x0', 'type': 'pci'} ccidSpecs = {'slot': '0', 'controller': '0', 'type': 'ccid'} pciSlots = [dict({'slot': '0x01'}, **pciSpecs), dict({'slot': '0x02'}, **pciSpecs), dict({'slot': '0x03'}, **pciSpecs)] deviceDef = {'virtioNic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F', 'network': '', 'address': pciSlots[2], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}, 'hotplugNic': {'vmId': customization['vmId'], 'nic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:2F', 'network': '', 'address': pciSlots[1], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}}, 'smartcard': {'type': 'smartcard', 'device': 'smartcard', 'address': ccidSpecs, 'alias': 'smartcard', 'specParams': {'type': 'spicevmc', 'mode': 'passthrough'}}, 'hotplugDisk': {'vmId': customization['vmId'], 'drive': diskSpecs}} if 'virtioRng' in devices: status, msg, caps = self.vdsm.getVdsCapabilities() self.assertEqual(status, SUCCESS, msg) if not caps['rngSources']: raise SkipTest('No suitable rng source on host found') # we can safely pick any device as long, as it exists deviceDef['virtioRng'] = {'type': 'rng', 'model': 'virtio', 'specParams': {'bytes': '1234', 'period': '20000', 'source': caps['rngSources'][0]}} for device in devices: if 'hotplug' not in device: customization['devices'].append(deviceDef[device]) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) if 'hotplugNic' in devices: self.retryAssert(partial(self.vdsm.hotplugNic, deviceDef['hotplugNic']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugNic, deviceDef['hotplugNic']), timeout=10) if 'hotplugDisk' in devices: self.retryAssert(partial(self.vdsm.hotplugDisk, deviceDef['hotplugDisk']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugDisk, deviceDef['hotplugDisk']), timeout=10)
class VirtTest(TestCaseBase): UPSTATES = frozenset(('Up', 'Powering up')) def setUp(self): self.vdsm = VdsProxy() def _getVmStatus(self, vmid): status, msg, result = self.vdsm.getVmStats(vmid) self.assertEqual(status, SUCCESS, msg) return result def assertQemuSetupComplete(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'WaitForLaunch', 'VM is not booting!') def assertVmBooting(self, vmid): result = self._getVmStatus(vmid) self.assertTrue(result['status'] != 'Down', 'VM is not booting!') def assertVmUp(self, vmid): result = self._getVmStatus(vmid) self.assertIn(result['status'], self.UPSTATES) def assertGuestUp(self, vmid, targetUptime=0): result = self._getVmStatus(vmid) if targetUptime > 0: self.assertTrue(int(result['elapsedTime']) >= targetUptime) else: self.assertEquals(result['status'], 'Up') def _waitForStartup(self, vmid, targetUptime=0): self.retryAssert(partial(self.assertQemuSetupComplete, vmid), timeout=10) self.retryAssert(partial(self.assertVmBooting, vmid), timeout=3) self.retryAssert(partial(self.assertVmUp, vmid), timeout=10) # 20 % more time on timeout self.retryAssert(partial(self.assertGuestUp, vmid, targetUptime), timeout=math.ceil(targetUptime * 1.2)) @requireKVM def testSimpleVm(self): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testSimpleVm'} with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, VM_MINIMAL_UPTIME) @requireKVM @permutations([['localfs'], ['iscsi'], ['nfs']]) def testVmWithStorage(self, backendType): disk = storage.StorageTest() disk.setUp() conf = storage.storageLayouts[backendType] drives = disk.generateDriveConf(conf) customization = {'vmId': '88888888-eeee-ffff-aaaa-111111111111', 'vmName': 'testVmWithStorage' + backendType, 'drives': drives} with RollbackContext() as rollback: disk.createVdsmStorageLayout(conf, 3, rollback) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, VM_MINIMAL_UPTIME) @requireKVM @permutations([['hotplugNic'], ['virtioNic'], ['smartcard'], ['hotplugDisk'], ['virtioRng']]) def testVmWithDevice(self, *devices): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'vmName': 'testVm', 'devices': []} storageLayout = storage.storageLayouts['localfs'] diskSpecs = storage.StorageTest.generateDriveConf(storageLayout) pciSpecs = {'bus': '0x00', 'domain': '0x0000', 'function': '0x0', 'type': 'pci'} ccidSpecs = {'slot': '0', 'controller': '0', 'type': 'ccid'} pciSlots = [dict({'slot': '0x01'}, **pciSpecs), dict({'slot': '0x02'}, **pciSpecs), dict({'slot': '0x03'}, **pciSpecs)] deviceDef = {'virtioNic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F', 'network': '', 'address': pciSlots[2], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}, 'hotplugNic': {'vmId': customization['vmId'], 'nic': {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:2F', 'network': '', 'address': pciSlots[1], 'device': 'bridge', 'type': 'interface', 'linkActive': True, 'filter': 'no-mac-spoofing'}}, 'smartcard': {'type': 'smartcard', 'device': 'smartcard', 'address': ccidSpecs, 'alias': 'smartcard', 'specParams': {'type': 'spicevmc', 'mode': 'passthrough'}}, 'hotplugDisk': {'vmId': customization['vmId'], 'drive': diskSpecs}} if 'virtioRng' in devices: status, msg, caps = self.vdsm.getVdsCapabilities() self.assertEqual(status, SUCCESS, msg) if not caps['rngSources']: raise SkipTest('No suitable rng source on host found') # we can safely pick any device as long, as it exists deviceDef['virtioRng'] = {'type': 'rng', 'model': 'virtio', 'specParams': {'bytes': '1234', 'period': '20000', 'source': caps['rngSources'][0]}} for device in devices: if 'hotplug' not in device: customization['devices'].append(deviceDef[device]) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, VM_MINIMAL_UPTIME) if 'hotplugNic' in devices: self.retryAssert(partial(self.vdsm.hotplugNic, deviceDef['hotplugNic']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugNic, deviceDef['hotplugNic']), timeout=10) if 'hotplugDisk' in devices: self.retryAssert(partial(self.vdsm.hotplugDisk, deviceDef['hotplugDisk']), timeout=10) self.retryAssert(partial(self.vdsm.hotunplugDisk, deviceDef['hotplugDisk']), timeout=10) @permutations([['self'], ['specParams'], ['vmPayload']]) def testVmWithCdrom(self, pathLocation): customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222', 'devices': [], 'vmName': ('testVmWithCdrom_{}').format(pathLocation)} # echo -n testPayload | md5sum # d37e46c24c78b1aed33496107afdb44b vmPayloadName = ('/var/run/vdsm/payload/{}.' 'd37e46c24c78b1aed33496107afdb44b' '.img').format(customization['vmId']) cdrom = {'index': '2', 'iface': 'ide', 'specParams': {}, 'readonly': 'true', 'path': '', 'device': 'cdrom', 'shared': 'false', 'type': 'disk'} with temporaryPath(0o666) as path: cdromPaths = {'self': {'path': path, 'specParams': {'path': '/dev/null'}}, 'specParams': {'path': '', 'specParams': {'path': path}}, 'vmPayload': {'path': '', 'specParams': {'path': '', 'vmPayload': {'volId': 'testConfig', 'file': {'testPayload': ''}}}}} cdrom.update(cdromPaths[pathLocation]) customization['devices'].append(cdrom) with RunningVm(self.vdsm, customization) as vm: self._waitForStartup(vm, 10) status, msg, stats = self.vdsm.getVmList(vm) self.assertEqual(status, SUCCESS, msg) for device in stats['devices']: if device['device'] == 'cdrom': if 'vmPayload' in cdrom['specParams']: cdrom['path'] = vmPayloadName self.assertEqual(device['path'], cdrom['path']) self.assertEqual(device['specParams']['path'], cdrom['specParams']['path'])