async def test_create_machine_and_restore_machine(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(2) macs = _generate_macs(2) manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) mock_image_store.clone_qcow.return_value = "/tmp/image.qcow" mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") old_allocator = allocator.Allocator(copy.copy(macs), copy.copy(gpu1), manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await old_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(old_allocator.vms) == 1 assert 'sasha-vm-0' in old_allocator.vms old_vm_info = await manager.info(old_allocator.vms['sasha-vm-0']) # Get json with which machine was created vm_def = mock_libvirt.define_vm.call_args.args[0] vm_metadata_to_restore = _emulate_libvirt_xml_dump_and_load(vm_def) mock_libvirt.load_lab_vms.return_value = [vm_metadata_to_restore] # Now recreate the allocator tested = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.restore_vms() sasha_vm = tested.vms['sasha-vm-0'] assert len(tested.vms) == 1 assert 'sasha-vm-0' in tested.vms assert str(sasha_vm.sol_port) in tested.sol_used_ports for pci in sasha_vm.pcis: assert pci.full_address not in [ pci.full_address for pci in tested.gpus_list ] vm_macs = [net['macaddress'] for net in sasha_vm.net_ifaces] for mac in vm_macs: assert mac not in tested.mac_addresses restored_vm_info = await manager.info(old_allocator.vms['sasha-vm-0']) assert restored_vm_info == old_vm_info
async def test_restore_machine_fail_to_restore_network_timeout_success( event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(2) macs = _generate_macs(2) mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) mock_image_store.clone_qcow.return_value = "/tmp/image.qcow" old_allocator = allocator.Allocator(copy.copy(macs), copy.copy(gpu1), manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await old_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(old_allocator.vms) == 1 assert 'sasha-vm-0' in old_allocator.vms old_vm_info = await manager.info(old_allocator.vms['sasha-vm-0']) # Get json with which machine was created vm_def = mock_libvirt.define_vm.call_args.args[0].json # Now set load to return same json mock_libvirt.load_lab_vms.return_value = [munch.Munch(vm_def)] # Now recreate the allocator mock_dhcp_handler.reallocate_ip = mock.AsyncMock( side_effect=TimeoutError("Failed to allocate ip")) tested = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.restore_vms() assert len(tested.vms) == 1 assert 'sasha-vm-0' in tested.vms restored_vm_info = await manager.info(old_allocator.vms['sasha-vm-0']) assert restored_vm_info == old_vm_info
async def test_kill_non_existing_vm(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) tested = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(tested.vms) == 1 with pytest.raises(Exception): await tested.destroy_vm("nonexisting") assert len(tested.vms) == 1
async def test_start_stop_machine(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) alloc = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await alloc.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(alloc.vms) == 1 assert 'sasha-vm-0' in alloc.vms await manager.stop_vm(alloc.vms["sasha-vm-0"]) mock_libvirt.poweroff_vm.assert_called_once() start_count = mock_libvirt.start_vm.call_count await manager.start_vm(alloc.vms["sasha-vm-0"]) assert mock_libvirt.start_vm.call_count == start_count + 1
async def test_restore_machine_fail_to_restore_network( event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(2) macs = _generate_macs(2) mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) mock_image_store.clone_qcow.return_value = "/tmp/image.qcow" old_allocator = allocator.Allocator(copy.copy(macs), copy.copy(gpu1), manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await old_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(old_allocator.vms) == 1 assert 'sasha-vm-0' in old_allocator.vms # Get json with which machine was created vm_def = mock_libvirt.define_vm.call_args.args[0] vm_metadata_to_restore = _emulate_libvirt_xml_dump_and_load(vm_def) mock_libvirt.load_lab_vms.return_value = [vm_metadata_to_restore] # Now recreate the allocator mock_dhcp_handler.reallocate_ip = mock.AsyncMock( side_effect=Exception("Failed to allocate ip")) tested = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.restore_vms() assert len(tested.vms) == 0 # We still must have all resources that allocator was initializes with assert tested.mac_addresses == macs assert tested.gpus_list == gpu1
async def test_allocate_machine_no_gpus(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpus = [] macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) tested = allocator.Allocator(macs, gpus, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.allocate_vm("sasha_image1", base_image_size=None, memory_gb=1, networks=["bridge"], num_cpus=2, num_gpus=0) assert len(tested.vms) == 1 vm = tested.vms['sasha-vm-0'] _verify_vm_valid(tested, vm, expected_vm_name="sasha-vm-0", expected_base_image="/home/sasha_king.qcow", expected_gpus=[], expected_mem=1, expected_networks=[{ "mac": macs[0], "type": "bridge", "source": "eth0" }], num_cpus=2) mock_image_store.clone_qcow.assert_called_with("sasha_image1", "sasha-vm-0", None) # # FIXME: add assertion on call mock_libvirt.define_vm.assert_called() # Now destroy the VM await tested.destroy_vm("sasha-vm-0") assert len(tested.vms) == 0 assert tested.gpus_list == [] assert tested.mac_addresses == macs mock_image_store.delete_qcow.assert_called_with("/home/sasha_king.qcow") mock_libvirt.kill_by_name.assert_called_with("sasha-vm-0")
async def test_allocate_more_vms_than_we_can(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpus = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) tested = allocator.Allocator(macs, gpus, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=0) assert len(tested.vms) == 1 with pytest.raises(NotEnoughResourceException): await tested.allocate_vm("sasha_image2", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=0) assert len(tested.vms) == 1 # Now destroy the VM await tested.destroy_vm("sasha-vm-0") assert len(tested.vms) == 0 assert tested.gpus_list == gpus assert tested.mac_addresses == macs # Now we can allocate more await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=0) assert len(tested.vms) == 1
async def test_vm_info(mock_libvirt, mock_image_store, aiohttp_client, loop, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") mock_libvirt.dhcp_lease_info.return_value = { '52:54:00:8d:c0:07': ['192.168.122.186', '192.168.122.187'], '52:54:00:8d:c0:08': ['192.168.122.188'] } mock_cloud_init.generate_iso.return_value = "/tmp/iso_path" mock_libvirt.status.return_value = 'on' mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") manager = vm_manager.VMManager(loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) alloc = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=1000) with mock.patch("socket.socket"): await alloc.allocate_vm("sasha_image1", memory_gb=1, base_image_size=20, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(alloc.vms) == 1 assert 'sasha-vm-0' in alloc.vms app = web.Application() rest.HyperVisor(alloc, image_store, app) client = await aiohttp_client(app) resp = await client.get("/vms/sasha-vm-0") vm = await resp.json() assert vm == { 'info': { 'name': 'sasha-vm-0', 'disks': [], 'status': 'on', 'dhcp': { '52:54:00:8d:c0:07': ['192.168.122.186', '192.168.122.187'], '52:54:00:8d:c0:08': ['192.168.122.188'] } } }
async def test_machine_info(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(2) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") mock_libvirt.dhcp_lease_info.return_value = { '52:54:00:8d:c0:07': ['192.168.122.186'], '52:54:00:8d:c0:08': ['192.168.122.187'] } mock_libvirt.status.return_value = "on" manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) alloc = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await alloc.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["isolated", "isolated"], num_cpus=2, num_gpus=1, disks=[{ "type": "ssd", "size": 10, "fs": "ext4" }, { "type": "hdd", "size": 5, "fs": "ext4" }]) assert len(alloc.vms) == 1 assert 'sasha-vm-0' in alloc.vms vm_info = await manager.info(alloc.vms['sasha-vm-0']) mock_libvirt.dhcp_lease_info.assert_called_once_with("sasha-vm-0") mock_libvirt.status.assert_called_once_with("sasha-vm-0") assert len(vm_info['disks']) == 2 assert vm_info['status'] == 'on' assert vm_info['dhcp'] == { '52:54:00:8d:c0:07': ['192.168.122.186'], '52:54:00:8d:c0:08': ['192.168.122.187'] }
async def test_free_resources_on_destroy_or_exception(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(2) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") mock_cloud_init.generate_iso.return_value = "my_iso.iso" tested_allocator = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) assert tested_allocator.sol_used_ports == [] vm1 = await tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1) assert tested_allocator.sol_used_ports == [5000] assert len(tested_allocator.gpus_list) == len(gpu1) - 1 assert len(tested_allocator.mac_addresses) == len(macs) - 1 await tested_allocator.destroy_vm(vm1.name) assert tested_allocator.sol_used_ports == [] assert len(tested_allocator.gpus_list) == len(gpu1) assert len(tested_allocator.mac_addresses) == len(macs) with mock.patch.object(vm.VM, '__init__', Exception("boom!")): try: await tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1) except: pass assert tested_allocator.sol_used_ports == [] assert len(tested_allocator.gpus_list) == len(gpu1) assert len(tested_allocator.mac_addresses) == len(macs)
async def test_vm_allocate(mock_libvirt, mock_image_store, aiohttp_client, loop, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") mock_cloud_init.generate_iso.return_value = "/tmp/iso_path" mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") manager = vm_manager.VMManager(loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) alloc = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=1000) # Note that here we need to hack it in order not to bind to real port alloc._reserve_free_port = lambda x: x app = web.Application() rest.HyperVisor(alloc, image_store, app) client = await aiohttp_client(app) resp = await client.post("/vms", json={ "base_image": "base.qcow", "ram": 100, "num_cpus": 1, "networks": ['bridge'], "num_gpus": 1, "disks": [] }) assert resp.status == 200 assert len(alloc.vms) == 1 assert 'sasha-vm-0' in alloc.vms
async def test_delete_machines_on_start(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(2) manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) alloc = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) existing_vms = [ vm.VM(name="name1", num_cpus=1, memsize=1, net_ifaces=[], sol_port=2, base_image='image').json, vm.VM(name="name2", num_cpus=11, memsize=11, net_ifaces=[], sol_port=22, base_image='image').json ] mock_libvirt.load_lab_vms.return_value = existing_vms await alloc.delete_all_dangling_vms() # Now lets make sure libvirt was called to destory vms mock_libvirt.kill_by_name.assert_has_calls( [mock.call("name1"), mock.call("name2")])
async def test_allocate_machine_happy_case(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") mock_cloud_init.generate_iso.return_value = "my_iso.iso" tested = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(tested.vms) == 1 vm = tested.vms['sasha-vm-0'] _verify_vm_valid(tested, vm, expected_vm_name="sasha-vm-0", expected_base_image="/home/sasha_king.qcow", expected_gpus=gpu1, expected_mem=1, expected_networks=[{ "mac": macs[0], "type": "bridge", "source": "eth0", "ip": "1.1.1.1" }], num_cpus=2, base_image_size=10) mock_cloud_init.generate_iso.assert_called_with(vm) mock_dhcp_handler.allocate_ip.assert_called() mock_image_store.clone_qcow.assert_called_with("sasha_image1", "sasha-vm-0", 10) mock_libvirt.define_vm.assert_called() # Now destroy the VM await tested.destroy_vm("sasha-vm-0") assert len(tested.vms) == 0 assert tested.gpus_list == gpu1 assert tested.mac_addresses == macs mock_image_store.delete_qcow.assert_called_with("/home/sasha_king.qcow") mock_libvirt.kill_by_name.assert_called_with("sasha-vm-0") mock_dhcp_handler.deallocate_ip.assert_called_once() await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks="bridge", num_cpus=2, num_gpus=1) assert len(tested.vms) == 1 await tested.destroy_vm("sasha-vm-0") assert len(tested.vms) == 0
async def test_concurrent_allocation_and_free_resources( event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(2) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") mock_cloud_init.generate_iso.return_value = "my_iso.iso" tested_allocator = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) # check that creating too many vms concurrently will cause a problem: with pytest.raises(NotEnoughResourceException): await asyncio.gather( tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1), tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1), tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1), tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1), tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1), tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1)) assert len(tested_allocator.vms) == 1 await tested_allocator.destroy_vm("sasha-vm-0") assert len(tested_allocator.vms) == 0 assert tested_allocator.gpus_list == gpu1 assert set(tested_allocator.mac_addresses) == set(macs) with mock.patch.object(vm.VM, '__init__', Exception("boom!")): with pytest.raises(Exception): await tested_allocator.allocate_vm("sasha_image1", memory_gb=1, base_image_size=10, networks=["bridge"], num_cpus=2, num_gpus=1)
async def test_vm_list(mock_libvirt, mock_image_store, aiohttp_client, loop, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") mock_libvirt.status.return_value = 'on' mock_cloud_init.generate_iso.return_value = "/tmp/iso_path" mock_dhcp_handler.allocate_ip = mock.AsyncMock(return_value="1.1.1.1") manager = vm_manager.VMManager(loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) alloc = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=1000) with mock.patch("uuid.uuid4") as uuid4: uuid4.return_value = "uuid" with mock.patch("socket.socket"): await alloc.allocate_vm("sasha_image1", base_image_size=20, memory_gb=1, networks=["bridge"], num_cpus=2, num_gpus=1) assert len(alloc.vms) == 1 assert 'sasha-vm-0' in alloc.vms app = web.Application() rest.HyperVisor(alloc, image_store, app) client = await aiohttp_client(app) resp = await client.get("/vms") vms = await resp.json() assert vms == { 'vms': [{ 'name': 'sasha-vm-0', "uuid": "uuid", 'num_cpus': 2, 'memsize': 1, 'net_ifaces': [{ 'ip': '1.1.1.1', 'macaddress': '00:00:00:00:00:00', 'mode': 'bridge', 'source': 'eth0' }], 'pcis': ["0:0:0.0"], "api_version": "v1", "base_image": "sasha_image1", 'base_image_size': 20, 'image': '/home/sasha_king.qcow', 'disks': [], 'status': 'on', "sol_port": 1000, 'cloud_init_iso': '/tmp/iso_path', 'allocation_id': None, 'requestor': None, 'password': '******', 'user': '******' }] }
vmm = libvirt_wrapper.LibvirtWrapper(args.qemu_uri) _check_network_interface_up(args.paravirt_net_device) _check_libvirt_network_is_up(vmm, args.private_net) storage = image_store.ImageStore(loop, base_qcow_path=args.images_dir, run_qcow_path=args.run_dir, ssd_path=args.ssd_dir, hdd_path=args.hdd_dir) gpu_pci_devices = config['pci'] _vfio_bind_pci_devices(config['pci']) ndb_driver = libstorage.NBDProvisioner() ndb_driver.initialize() vm_boot_init = cloud_init.CloudInit(args.run_dir) bridged_dhcp = dhcp_handlers.DHCPRequestor(args.paravirt_net_device, loop) nat_dhcp = dhcp_handlers.LibvirtDHCPAllocator(loop, vmm, args.private_net) dhcp_client = dhcp_handlers.DHCPManager(handlers={'bridge': bridged_dhcp, 'isolated' : nat_dhcp}) manager = vm_manager.VMManager(loop, vmm, storage, ndb_driver, vm_boot_init, dhcp_client) allocator = allocator.Allocator(mac_addresses=config['macs'], gpus_list=gpu_pci_devices, vm_manager=manager, server_name=args.server_name, max_vms=args.max_vms, private_network=args.private_net, paravirt_device=args.paravirt_net_device, sol_base_port=args.sol_port) if args.vms_restore: loop.run_until_complete(allocator.restore_vms()) else: loop.run_until_complete(allocator.delete_all_dangling_vms()) app = web.Application() app["info"] = dict(alias=f'{args.server_name}-hypervisor', rm_type='hypervisor', endpoint=f'{ip.get_ip()}:{args.port}') if args.provisioner is not None: app["provisioner_address"] = args.provisioner app.on_startup.append(start_daemons) rest.HyperVisor(allocator, storage, app) web.run_app(app, port=args.port, access_log_format='%a %t "%r" time %Tf sec %s')
async def test_allocate_machine_with_disks(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpu1 = _generate_device(1) macs = _generate_macs(1) mock_image_store.clone_qcow = mock.AsyncMock( return_value="/home/sasha_king.qcow") mock_image_store.create_qcow = mock.AsyncMock( side_effect=["/home/disk1.qcow", "/home/disk2.qcow"]) manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) tested = allocator.Allocator(macs, gpu1, manager, "sasha", max_vms=1, paravirt_device="eth0", sol_base_port=5000) await tested.allocate_vm("sasha_image1", base_image_size=None, memory_gb=1, networks=["bridge"], num_cpus=2, num_gpus=1, disks=[{ "type": "ssd", "size": 10, "fs": "xfs" }, { "type": "hdd", "size": 5, "fs": "ext4" }]) assert len(tested.vms) == 1 vm = tested.vms['sasha-vm-0'] ssd = _find_disks_by_type(vm, "ssd")[0] hdd = _find_disks_by_type(vm, "hdd")[0] _verify_vm_valid(tested, vm, expected_vm_name="sasha-vm-0", expected_base_image="/home/sasha_king.qcow", expected_gpus=gpu1, expected_mem=1, expected_networks=[{ "mac": macs[0], "type": "bridge", "source": "eth0" }], num_cpus=2, disks=[ssd, hdd]) provision_calls = [ call(ssd['image'], 'xfs', ssd['serial']), call(hdd['image'], 'ext4', hdd['serial']) ] mock_nbd_provisioner.provision_disk.assert_has_calls(provision_calls, any_order=True) mock_image_store.clone_qcow.assert_called_with("sasha_image1", "sasha-vm-0", None) mock_libvirt.define_vm.assert_called() # Now destroy the VM await tested.destroy_vm("sasha-vm-0") assert len(tested.vms) == 0 assert tested.gpus_list == gpu1 assert tested.mac_addresses == macs # One for boot, one for ssd one for hdd assert mock_image_store.delete_qcow.call_count == 3 deleted_images = set( [call[0][0] for call in mock_image_store.delete_qcow.call_args_list]) assert deleted_images == set( ["/home/sasha_king.qcow", "/home/disk1.qcow", "/home/disk2.qcow"]) mock_libvirt.kill_by_name.assert_called_with("sasha-vm-0")
async def test_allocate_multiple(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler): gpus = _generate_device(10) macs = _generate_macs(10) mock_image_store.clone_qcow = mock.AsyncMock( side_effect=["1.qcow", "2.qcow"]) manager = vm_manager.VMManager(event_loop, mock_libvirt, mock_image_store, mock_nbd_provisioner, mock_cloud_init, mock_dhcp_handler) tested = allocator.Allocator(macs, gpus, manager, "sasha", max_vms=3, paravirt_device="eth0", sol_base_port=5000) await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge"], num_cpus=2, num_gpus=2) assert len(tested.vms) == 1 # Lets verify that we have 8 gpus and 9 nics left in pool assert len(tested.gpus_list) == 8 assert len(tested.mac_addresses) == 9 await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["bridge", "isolated"], num_cpus=2, num_gpus=3) assert len(tested.vms) == 2 # Lets verify that we have 8 gpus and 9 nics left in pool assert len(tested.gpus_list) == 5 assert len(tested.mac_addresses) == 7 # Now lets exaust gpus with pytest.raises(NotEnoughResourceException): await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=[], num_cpus=2, num_gpus=6) assert len(tested.vms) == 2 assert len(tested.gpus_list) == 5 assert len(tested.mac_addresses) == 7 # Lets exaust macs with pytest.raises(NotEnoughResourceException): await tested.allocate_vm("sasha_image1", memory_gb=1, base_image_size=None, networks=["isolated"] * 8, num_cpus=2, num_gpus=0) assert len(tested.vms) == 2 assert len(tested.gpus_list) == 5 assert len(tested.mac_addresses) == 7