def setUp(self, connect, update, creds): creds.return_value = ["username", "password"] self.vim_client = VimClient(auto_sync=False) self.vim_client.wait_for_task = MagicMock() self.patcher = patch("host.hypervisor.esx.vm_config.GetEnv") self.patcher.start() self.vm_manager = EsxVmManager(self.vim_client, MagicMock())
def __init__(self, vim_client, image_datastores, host_name="localhost"): super(HttpNfcTransferer, self).__init__(vim_client) self.lock = threading.Lock() self._lease_url_host_name = host_name self._image_datastores = image_datastores self._vm_config = EsxVmConfig(self._vim_client) self._vm_manager = EsxVmManager(self._vim_client, None)
def setUp(self): if "host_remote_test" not in config: raise SkipTest() self.host = config["host_remote_test"]["server"] self.pwd = config["host_remote_test"]["esx_pwd"] if self.host is None or self.pwd is None: raise SkipTest() self._logger = logging.getLogger(__name__) self.vim_client = VimClient(self.host, "root", self.pwd) self.vm_manager = EsxVmManager(self.vim_client, []) for vm in self.vim_client.get_vms(): vm.Destroy()
def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=lambda: suicide()) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid self.set_memory_overcommit(agent_config.memory_overcommit) self.datastore_manager = EsxDatastoreManager( self, agent_config.datastores, agent_config.image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer( self.vim_client, self.datastore_manager.image_datastores()) atexit.register(self.image_manager.cleanup)
def setUp(self, connect, update, creds): # Create VM manager creds.return_value = ["username", "password"] self.vim_client = VimClient(auto_sync=False) self.vim_client.wait_for_task = MagicMock() self.patcher = patch("host.hypervisor.esx.vm_config.GetEnv") self.patcher.start() self.vm_manager = EsxVmManager(self.vim_client, MagicMock()) services.register(ServiceName.AGENT_CONFIG, MagicMock()) # Set up test files self.base_dir = os.path.dirname(__file__) self.test_dir = os.path.join(self.base_dir, "../../test_files") self.image_manager = EsxImageManager(MagicMock(), MagicMock()) self.image_scanner = DatastoreImageScanner(self.image_manager, self.vm_manager, self.DATASTORE_ID) self.write_count = 0
def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. errback = lambda: suicide() self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=errback) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid # Enable/Disable large page support. If this host is removed # from the deployment, large page support will need to be # explicitly updated by the user. disable_large_pages = agent_config.memory_overcommit > 1.0 self.vim_client.set_large_page_support(disable=disable_large_pages) image_datastores = [ds["name"] for ds in agent_config.image_datastores] self.datastore_manager = EsxDatastoreManager(self, agent_config.datastores, image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer(self.vim_client, image_datastores) atexit.register(self.image_manager.cleanup)
class TestEsxVmManager(unittest.TestCase): @patch.object(VimClient, "acquire_credentials") @patch.object(VimClient, "update_cache") @patch("pysdk.connect.Connect") def setUp(self, connect, update, creds): creds.return_value = ["username", "password"] self.vim_client = VimClient(auto_sync=False) self.vim_client.wait_for_task = MagicMock() self.patcher = patch("host.hypervisor.esx.vm_config.GetEnv") self.patcher.start() self.vm_manager = EsxVmManager(self.vim_client, MagicMock()) def tearDown(self): self.patcher.stop() self.vim_client.disconnect(wait=True) def test_power_vm_not_found(self): """Test that we propagate VmNotFound.""" self.vim_client.find_by_inventory_path = MagicMock(return_value=None) self.assertRaises(VmNotFoundException, self.vm_manager.power_on_vm, "ENOENT") def test_power_vm_illegal_state(self): """Test InvalidPowerState propagation.""" vm_mock = MagicMock(name="vm_mock") self.vm_manager.vim_client.get_vm = vm_mock self.vim_client.wait_for_task.side_effect = \ vim.fault.InvalidPowerState() self.assertRaises(VmPowerStateException, self.vm_manager.power_on_vm, "foo") def test_power_vm_error(self): """Test general Exception propagation.""" vm_mock = MagicMock(name="vm_mock") self.vm_manager.vim_client.get_vm = vm_mock self.vim_client.wait_for_task.side_effect = vim.fault.TaskInProgress self.assertRaises(vim.fault.TaskInProgress, self.vm_manager.power_on_vm, "foo") def test_add_nic(self): """Test add nic""" # 3 cases for add_nic: # * caller passes in network_id = None # * caller passes in the correct network_id and hostd # returns the right thing from get_network. def _get_device(devices, controller_type): f = MagicMock("get_device_foo") f.key = 1 return f self.vm_manager.vm_config.get_device = _get_device spec = self.vm_manager.vm_config.update_spec() # Caller passes none self.vm_manager.add_nic(spec, None) # Caller passes some network_id self.vm_manager.add_nic(spec, "Private Vlan") def test_create_vm_already_exist(self): """Test VM creation fails if VM is found""" vim_mock = MagicMock() self.vm_manager.vim_client = vim_mock vim_mock.find_vm = MagicMock(return_value="existing_vm") mock_spec = MagicMock() self.assertRaises(VmAlreadyExistException, self.vm_manager.create_vm, "existing_vm_name", mock_spec) def test_create_vm(self): """Test VM creation""" vim_mock = MagicMock() self.vm_manager.vim_client = vim_mock vm_folder_mock = MagicMock() vim_mock.vm_folder = vm_folder_mock root_res_pool_mock = PropertyMock(return_value="fake_rp") type(vim_mock).root_resource_pool = root_res_pool_mock vim_mock.get_vm_in_cache = MagicMock(return_value=None) vm_folder_mock.CreateVm.return_value = "fake-task" mock_spec = MagicMock() mock_spec.files.vmPathName = "[] /vmfs/volumes/ds/vms" self.vm_manager.create_vm("fake_vm_id", mock_spec) vim_mock.get_vm_in_cache.assert_called_once_with("fake_vm_id") vm_folder_mock.CreateVm.assert_called_once_with( mock_spec, 'fake_rp', None) vim_mock.wait_for_task.assert_called_once_with("fake-task") @staticmethod def _validate_spec_extra_config(spec, config, expected): """Validates the config entries against the created config spec when expected=True, returns True iff all the entries in config are found in the config spec's extraConfig when expected=False, returns True iff all the entries in config are not found in the config spec's extraConfig """ for k, v in config.items(): in_spec = any( (x.key == k and x.value == v) for x in spec.extraConfig) if in_spec is not expected: return False return True def _create_vm_spec(self, metadata, env): """Test VM spec creation""" flavor = Flavor("default", [ QuotaLineItem("vm.memory", "256", Unit.MB), QuotaLineItem("vm.cpu", "1", Unit.COUNT), ]) create_spec_mock = MagicMock( wraps=self.vm_manager.vm_config.create_spec) self.vm_manager.vm_config.create_spec = create_spec_mock spec = self.vm_manager.create_vm_spec("vm_id", "ds1", flavor, metadata, env) create_spec_mock.assert_called_once_with("vm_id", "ds1", 256, 1, metadata, env) return spec def test_create_vm_spec(self): metadata = { "configuration": {}, "parameters": [{ "name": "bios.bootOrder" }] } extra_config_metadata = {} non_extra_config_metadata = { "scsi0.virtualDev": "lsisas1068", "bogus": "1" } metadata["configuration"].update(extra_config_metadata) metadata["configuration"].update(non_extra_config_metadata) env = {"disallowed_key": "x", "bios.bootOrder": "x"} spec = self._create_vm_spec(metadata, env) expected_extra_config = extra_config_metadata.copy() expected_extra_config["bios.bootOrder"] = "x" self.assertTrue( TestEsxVmManager._validate_spec_extra_config( spec, config=expected_extra_config, expected=True)) self.assertTrue( TestEsxVmManager._validate_spec_extra_config( spec, config=non_extra_config_metadata, expected=False)) assert_that(spec.flags.diskUuidEnabled, equal_to(True)) def test_customize_vm_with_metadata(self): metadata = { "configuration": { "annotation": "fake_annotation", "serial0.fileType": "network", "serial0.yieldOnMsrRead": "TRUE", "serial0.network.endPoint": "server" }, "parameters": [{ "name": "serial0.fileName" }, { "name": "serial0.vspc" }] } env = { "serial0.fileName": "vSPC.py", "serial0.vspc": "telnet://1.2.3.4:17000", } spec = self._create_vm_spec(metadata, env) self.vm_manager.customize_vm(spec) assert_that(spec.annotation, equal_to("fake_annotation")) backing = spec.deviceChange[0].device.backing assert_that( backing, instance_of(vim.vm.device.VirtualSerialPort.URIBackingInfo)) assert_that(backing.serviceURI, equal_to('vSPC.py')) assert_that(backing.proxyURI, equal_to('telnet://1.2.3.4:17000')) assert_that(backing.direction, equal_to('server')) @staticmethod def _summarize_controllers_in_spec(cfg_spec, base_type, expected_type): num_scsi_adapters_matching_expected_type = 0 num_scsi_adapters_not_matching_expected_type = 0 for dev_change in cfg_spec.deviceChange: dev = dev_change.device if isinstance(dev, expected_type): num_scsi_adapters_matching_expected_type += 1 elif (isinstance(dev, base_type) and not isinstance(dev, expected_type)): num_scsi_adapters_not_matching_expected_type += 1 return (num_scsi_adapters_matching_expected_type, num_scsi_adapters_not_matching_expected_type) @parameterized.expand([ ("lsilogic", vim.vm.device.VirtualLsiLogicController), ("lsisas1068", vim.vm.device.VirtualLsiLogicSASController), ("pvscsi", vim.vm.device.ParaVirtualSCSIController), ("buslogic", vim.vm.device.VirtualBusLogicController) ]) def test_customize_disk_adapter_type(self, ctlr_type_value, expected_ctlr_type): metadata = {"configuration": {"scsi0.virtualDev": ctlr_type_value}} spec = self._create_vm_spec(metadata, {}) ds = "fake_ds" disk_id = str(uuid.uuid4()) parent_disk_id = str(uuid.uuid4()) capacity_mb = 1024 self.vm_manager.create_child_disk(spec, ds, disk_id, parent_disk_id) self.vm_manager.create_empty_disk(spec, ds, disk_id, capacity_mb) # check that we only create one controller of desired type to attach # to both disks summary = TestEsxVmManager._summarize_controllers_in_spec( spec, vim.vm.device.VirtualSCSIController, expected_ctlr_type) assert_that(summary, equal_to((1, 0))) @parameterized.expand([ ("vmxnet", vim.vm.device.VirtualVmxnet), ("vmxnet2", vim.vm.device.VirtualVmxnet2), ("vmxnet3", vim.vm.device.VirtualVmxnet3), ("vlance", vim.vm.device.VirtualPCNet32), ("e1000", vim.vm.device.VirtualE1000), ("e1000e", vim.vm.device.VirtualE1000e), ]) @patch.object(VimClient, "get_network") def test_customize_nic_adapter_type(self, ctlr_type_value, expected_ctlr_type, mock_get_network): metadata = {"configuration": {"ethernet0.virtualDev": ctlr_type_value}} spec = self._create_vm_spec(metadata, {}) fake_network = MagicMock() fake_network.name = "fake_network_name" mock_get_network.return_value = fake_network self.vm_manager.add_nic(spec, "fake_network_id") summary = TestEsxVmManager._summarize_controllers_in_spec( spec, vim.vm.device.VirtualEthernetCard, expected_ctlr_type) assert_that(summary, equal_to((1, 0))) @parameterized.expand([('a.txt', 'Stray file: a.txt'), ('b.vmdk', 'Stray disk (possible data leak): b.vmdk')]) @patch.object(os.path, "isdir", return_value=True) @patch.object(os.path, "islink", return_value=False) @patch.object(shutil, "rmtree") def test_ensure_directory_cleanup(self, stray_file, expected, rmtree, islink, isdir): """Test cleanup of stray vm directory""" self.vm_manager._logger = MagicMock() with patch.object(os, "listdir", return_value=[stray_file]): self.vm_manager._ensure_directory_cleanup( "/vmfs/volumes/fake/vm_vm_foo") rmtree.assert_called_once_with("/vmfs/volumes/fake/vm_vm_foo") self.vm_manager._logger.info.assert_called_once_with(expected) self.vm_manager._logger.warning.assert_called_once_with( "Force delete vm directory /vmfs/volumes/fake/vm_vm_foo") def test_delete_vm(self): """Test deleting a VM""" runtime = MagicMock() runtime.powerState = "poweredOff" vm = MagicMock() vm.runtime = runtime self.vm_manager.vim_client.get_vm = MagicMock(return_value=vm) self.vm_manager.vm_config.get_devices = MagicMock(return_value=[]) self.vm_manager.get_vm_path = MagicMock() self.vm_manager.get_vm_path.return_value = "[fake] vm_foo/xxyy.vmx" self.vm_manager.get_vm_datastore = MagicMock() self.vm_manager.get_vm_datastore.return_value = "fake" self.vm_manager._ensure_directory_cleanup = MagicMock() self.vm_manager.delete_vm("vm_foo") self.vm_manager._ensure_directory_cleanup.assert_called_once_with( "/vmfs/volumes/fake/vm_vm_foo") @parameterized.expand([("poweredOn"), ("suspended")]) def test_delete_vm_wrong_state(self, state): runtime = MagicMock() runtime.powerState = state vm = MagicMock() vm.runtime = runtime self.vm_manager.vim_client.get_vm = MagicMock(return_value=vm) self.assertRaises(VmPowerStateException, self.vm_manager.delete_vm, "vm_foo") def test_add_vm_disk(self): """Test adding VM disk""" self.vm_manager.vim_client.get_vm = MagicMock() self.vm_manager.vm_config.get_devices = MagicMock( return_value=[DEFAULT_DISK_CONTROLLER_CLASS(key=1000)]) info = FakeConfigInfo() spec = self.vm_manager.vm_config.update_spec() self.vm_manager.add_disk(spec, "ds1", "vm_foo", info) def test_used_memory(self): self.vm_manager.vim_client.get_vms_in_cache = MagicMock(return_value=[ VmCache(memory_mb=1024), VmCache(), VmCache(memory_mb=2048) ]) memory = self.vm_manager.get_used_memory_mb() self.assertEqual(memory, 2048 + 1024) def atest_remove_vm_disk(self): """Test removing VM disk""" datastore = "ds1" disk_id = "foo" self.vm_manager.vim_client.get_vm = MagicMock() self.vm_manager.vm_config.get_devices = MagicMock(return_value=[ vim.vm.device.VirtualLsiLogicController(key=1000), self.vm_manager.vm_config.create_disk_spec(datastore, disk_id) ]) info = FakeConfigInfo() self.vm_manager.remove_disk("vm_foo", datastore, disk_id, info) def btest_remove_vm_disk_enoent(self): """Test removing VM disk that isn't attached""" self.vm_manager.vim_client.get_vm = MagicMock() self.vm_manager.vm_config.get_devices = MagicMock(return_value=[ self.vm_manager.vm_config.create_disk_spec("ds1", "foo") ]) self.assertRaises(vim.fault.DeviceNotFound, self.vm_manager.remove_disk, "vm_foo", "ds1", "bar") def test_check_ip_v4(self): """Test to check ipv4 validation""" self.assertTrue(NetUtil.is_ipv4_address("1.2.3.4")) self.assertFalse( NetUtil.is_ipv4_address("FE80:0000:0000:0000:0202:B3FF:FE1E:8329")) self.assertFalse(NetUtil.is_ipv4_address("InvalidAddress")) def test_check_prefix_len_to_netmask_conversion(self): """Check the conversion from prefix length to netmask""" self.assertEqual(NetUtil.prefix_len_to_mask(32), "255.255.255.255") self.assertEqual(NetUtil.prefix_len_to_mask(0), "0.0.0.0") self.assertRaises(ValueError, NetUtil.prefix_len_to_mask, 33) self.assertEqual(NetUtil.prefix_len_to_mask(23), "255.255.254.0") self.assertEqual(NetUtil.prefix_len_to_mask(6), "252.0.0.0") self.assertEqual(NetUtil.prefix_len_to_mask(32), "255.255.255.255") def test_get_vm_network_guest_info(self): """ Tests the guest vm network info, without the vmx returned info. Test 1: Only mac address info available. Test 2: Only mac + ipv4 address available. Test 3: Only mac + ipv6 address available. Test 4: Only mac + ipv6, ipv4 address available. Test 5: No mac or ipv4 address available """ sample_mac_address = "00:0c:29:00:00:01" sample_ip_address = "127.0.0.2" sample_prefix_length = 24 sample_netmask = "255.255.255.0" sample_ipv6_address = "FE80:0000:0000:0000:0202:B3FF:FE1E:8329" sample_network = "VM Network" def _get_v4_address(): ip_address = MagicMock(name="ipv4address") ip_address.ipAddress = sample_ip_address ip_address.prefixLength = sample_prefix_length return ip_address def _get_v6_address(): ip_address = MagicMock(name="ipv6address") ip_address.ipAddress = sample_ipv6_address ip_address.prefixLength = sample_prefix_length return ip_address def _guest_info_1(): """ Only have the mac address. """ net = MagicMock(name="guest_info_1") net.macAddress = sample_mac_address net.connected = True net.network = None return net def _guest_info_2(): """ Have mac and ipv4 address """ net = MagicMock(name="guest_info_2") net.macAddress = sample_mac_address net.ipConfig.ipAddress = [_get_v4_address()] net.network = sample_network net.connected = False return net def _guest_info_3(): """ Have mac and ipv6 address """ net = MagicMock(name="guest_info_3") net.macAddress = sample_mac_address net.ipConfig.ipAddress = [_get_v6_address()] net.connected = False net.network = sample_network return net def _guest_info_4(): """ Have a mac and an ipv4 and an ipv6 address """ net = MagicMock(name="guest_info_4") net.macAddress = sample_mac_address net.network = None net.ipConfig.ipAddress = [_get_v6_address(), _get_v4_address()] net.connected = True return net def _get_vm_no_net_info(vm_id): """ Return empty guest_info """ f = MagicMock(name="get_vm") f.config.uuid = str(uuid.uuid4()) g = MagicMock(name="guest_info") f.guest = g g.net = [] return f def _get_vm(vm_id): """ Return a mocked up guest info object """ f = MagicMock(name="get_vm") g = MagicMock(name="guest_info") f.guest = g net = _guest_info() g.net = [net] return f def _get_vm_vim_guest_info(vm_id): """ Return a real Vim object with reasonable values to validate python typing """ f = MagicMock(name="get_vm") f.config.uuid = str(uuid.uuid4()) g = MagicMock(name="guest_info") f.guest = g net = vim.vm.GuestInfo.NicInfo() ip_config_info = vim.net.IpConfigInfo() net.ipConfig = ip_config_info net.macAddress = sample_mac_address net.network = sample_network net.connected = True ipAddress = vim.net.IpConfigInfo.IpAddress() ipAddress.ipAddress = sample_ip_address ipAddress.prefixLength = sample_prefix_length ip_config_info.ipAddress.append(ipAddress) g.net = [net] return f # Test 1 _guest_info = _guest_info_1 self.vm_manager.vim_client.get_vm = _get_vm self.vm_manager._get_mac_network_mapping = MagicMock(return_value={}) network_info = self.vm_manager.get_vm_network("vm_foo1") expected_1 = VmNetworkInfo(mac_address=sample_mac_address, is_connected=ConnectedStatus.CONNECTED) self.assertEqual(network_info, [expected_1]) # Test 2 _guest_info = _guest_info_2 network_info = self.vm_manager.get_vm_network("vm_foo2") ip_address = Ipv4Address(ip_address=sample_ip_address, netmask=sample_netmask) expected_2 = VmNetworkInfo(mac_address=sample_mac_address, ip_address=ip_address, network=sample_network, is_connected=ConnectedStatus.DISCONNECTED) self.assertEqual(network_info, [expected_2]) # Test 3 _guest_info = _guest_info_3 network_info = self.vm_manager.get_vm_network("vm_foo3") expected_3 = VmNetworkInfo(mac_address=sample_mac_address, network=sample_network, is_connected=ConnectedStatus.DISCONNECTED) self.assertEqual(network_info, [expected_3]) # Test 4 _guest_info = _guest_info_4 network_info = self.vm_manager.get_vm_network("vm_foo4") expected_4 = VmNetworkInfo(mac_address=sample_mac_address, ip_address=ip_address, is_connected=ConnectedStatus.CONNECTED) self.assertEqual(network_info, [expected_4]) # Test 5 self.vm_manager.vim_client.get_vm = _get_vm_no_net_info network_info = self.vm_manager.get_vm_network("vm_foo5") self.assertEqual(network_info, []) # Test 6 self.vm_manager.vim_client.get_vm = _get_vm_vim_guest_info network_info = self.vm_manager.get_vm_network("vm_foo5") expected_6 = VmNetworkInfo(mac_address=sample_mac_address, ip_address=ip_address, network=sample_network, is_connected=ConnectedStatus.CONNECTED) self.assertEqual(network_info, [expected_6]) def test_get_linked_clone_image_path(self): image_path = self.vm_manager.get_linked_clone_image_path # VM not found vm = MagicMock(return_value=None) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # disks is None vm = MagicMock(return_value=VmCache(disks=None)) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # disks is an empty list vm = MagicMock(return_value=VmCache(disks=[])) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # no image disk vm = MagicMock(return_value=VmCache(disks=["a", "b", "c"])) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # image found image = "[ds1] image_ttylinux/ttylinux.vmdk" vm = MagicMock(return_value=VmCache(disks=["a", "b", image])) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(datastore_to_os_path(image))) def test_set_vnc_port(self): flavor = Flavor("default", [ QuotaLineItem("vm.memory", "256", Unit.MB), QuotaLineItem("vm.cpu", "1", Unit.COUNT), ]) spec = self.vm_manager.create_vm_spec("vm_id", "ds1", flavor) self.vm_manager.set_vnc_port(spec, 5901) options = [ o for o in spec.extraConfig if o.key == 'RemoteDisplay.vnc.enabled' ] assert_that(options[0].value, equal_to('True')) options = [ o for o in spec.extraConfig if o.key == 'RemoteDisplay.vnc.port' ] assert_that(options[0].value, equal_to(5901)) @patch.object(VimClient, "get_vm") def test_get_vnc_port(self, get_vm): vm_mock = MagicMock() vm_mock.config.extraConfig = [ vim.OptionValue(key="RemoteDisplay.vnc.port", value="5901") ] get_vm.return_value = vm_mock port = self.vm_manager.get_vnc_port("id") assert_that(port, equal_to(5901)) def test_get_resources(self): """ Test that get_resources excludes VMs/disks if it can't find their corresponding datastore UUIDs. """ self.vm_manager.vim_client.get_vms_in_cache = MagicMock(return_value=[ VmCache(path="vm_path_a", disks=["disk_a", "disk_b", "disk_c"]), VmCache(path="vm_path_b", disks=["disk_b", "disk_c", "disk_d"]), VmCache(path="vm_path_c", disks=["disk_c", "disk_d", "disk_e"]), ]) def normalize(name): if name == "vm_path_b" or name == "disk_b": raise DatastoreNotFoundException() return name def mock_get_name(path): return path def mock_get_state(power_state): return State.STOPPED self.vm_manager._ds_manager.normalize.side_effect = normalize self.vm_manager._get_datastore_name_from_ds_path = mock_get_name self.vm_manager._power_state_to_resource_state = mock_get_state # vm_path_b and disk_b are not included in the get_resources response. resources = self.vm_manager.get_resources() assert_that(len(resources), equal_to(2)) assert_that(len(resources[0].disks), equal_to(2)) assert_that(len(resources[1].disks), equal_to(3)) @patch.object(VimClient, "get_vms") def test_get_occupied_vnc_ports(self, get_vms): get_vms.return_value = [ self._create_vm_mock(5900), self._create_vm_mock(5901) ] ports = self.vm_manager.get_occupied_vnc_ports() assert_that(ports, contains_inanyorder(5900, 5901)) def _create_vm_mock(self, vnc_port): vm = MagicMock() vm.config.extraConfig = [] vm.config.extraConfig.append( vim.OptionValue(key="RemoteDisplay.vnc.port", value=str(vnc_port))) vm.config.extraConfig.append( vim.OptionValue(key="RemoteDisplay.vnc.enabled", value="True")) return vm
class TestEsxVmManager(unittest.TestCase): @patch.object(VimClient, "acquire_credentials") @patch.object(VimClient, "update_cache") @patch("pysdk.connect.Connect") def setUp(self, connect, update, creds): creds.return_value = ["username", "password"] self.vim_client = VimClient(auto_sync=False) self.vim_client.wait_for_task = MagicMock() self.patcher = patch("host.hypervisor.esx.vm_config.GetEnv") self.patcher.start() self.vm_manager = EsxVmManager(self.vim_client, MagicMock()) def tearDown(self): self.patcher.stop() self.vim_client.disconnect(wait=True) def test_power_vm_not_found(self): """Test that we propagate VmNotFound.""" self.vim_client.find_by_inventory_path = MagicMock(return_value=None) self.assertRaises(VmNotFoundException, self.vm_manager.power_on_vm, "ENOENT") def test_power_vm_illegal_state(self): """Test InvalidPowerState propagation.""" vm_mock = MagicMock(name="vm_mock") self.vm_manager.vim_client.get_vm = vm_mock self.vim_client.wait_for_task.side_effect = \ vim.fault.InvalidPowerState() self.assertRaises(VmPowerStateException, self.vm_manager.power_on_vm, "foo") def test_power_vm_error(self): """Test general Exception propagation.""" vm_mock = MagicMock(name="vm_mock") self.vm_manager.vim_client.get_vm = vm_mock self.vim_client.wait_for_task.side_effect = vim.fault.TaskInProgress self.assertRaises(vim.fault.TaskInProgress, self.vm_manager.power_on_vm, "foo") def test_add_nic(self): """Test add nic""" # 3 cases for add_nic: # * caller passes in network_id = None # * caller passes in the correct network_id and hostd # returns the right thing from get_network. def _get_device(devices, controller_type): f = MagicMock("get_device_foo") f.key = 1 return f self.vm_manager.vm_config.get_device = _get_device spec = self.vm_manager.vm_config.update_spec() # Caller passes none self.vm_manager.add_nic(spec, None) # Caller passes some network_id self.vm_manager.add_nic(spec, "Private Vlan") def test_create_vm_already_exist(self): """Test VM creation fails if VM is found""" vim_mock = MagicMock() self.vm_manager.vim_client = vim_mock vim_mock.find_vm = MagicMock(return_value="existing_vm") mock_spec = MagicMock() self.assertRaises(VmAlreadyExistException, self.vm_manager.create_vm, "existing_vm_name", mock_spec) def test_create_vm(self): """Test VM creation""" vim_mock = MagicMock() self.vm_manager.vim_client = vim_mock vm_folder_mock = MagicMock() vim_mock.vm_folder = vm_folder_mock root_res_pool_mock = PropertyMock(return_value="fake_rp") type(vim_mock).root_resource_pool = root_res_pool_mock vim_mock.get_vm_in_cache = MagicMock(return_value=None) vm_folder_mock.CreateVm.return_value = "fake-task" mock_spec = MagicMock() mock_spec.files.vmPathName = "[] /vmfs/volumes/ds/vms" self.vm_manager.create_vm("fake_vm_id", mock_spec) vim_mock.get_vm_in_cache.assert_called_once_with("fake_vm_id") vm_folder_mock.CreateVm.assert_called_once_with( mock_spec, 'fake_rp', None) vim_mock.wait_for_task.assert_called_once_with("fake-task") @staticmethod def _validate_spec_extra_config(spec, config, expected): """Validates the config entries against the created config spec when expected=True, returns True iff all the entries in config are found in the config spec's extraConfig when expected=False, returns True iff all the entries in config are not found in the config spec's extraConfig """ for k, v in config.items(): in_spec = any((x.key == k and x.value == v) for x in spec.extraConfig) if in_spec is not expected: return False return True def _create_vm_spec(self, metadata, env): """Test VM spec creation""" flavor = Flavor("default", [ QuotaLineItem("vm.memory", "256", Unit.MB), QuotaLineItem("vm.cpu", "1", Unit.COUNT), ]) create_spec_mock = MagicMock( wraps=self.vm_manager.vm_config.create_spec) self.vm_manager.vm_config.create_spec = create_spec_mock spec = self.vm_manager.create_vm_spec( "vm_id", "ds1", flavor, metadata, env) create_spec_mock.assert_called_once_with( "vm_id", "ds1", 256, 1, metadata, env) return spec def test_create_vm_spec(self): metadata = { "configuration": {}, "parameters": [ {"name": "bios.bootOrder"} ] } extra_config_metadata = {} non_extra_config_metadata = {"scsi0.virtualDev": "lsisas1068", "bogus": "1"} metadata["configuration"].update(extra_config_metadata) metadata["configuration"].update(non_extra_config_metadata) env = {"disallowed_key": "x", "bios.bootOrder": "x"} spec = self._create_vm_spec(metadata, env) expected_extra_config = extra_config_metadata.copy() expected_extra_config["bios.bootOrder"] = "x" self.assertTrue(TestEsxVmManager._validate_spec_extra_config( spec, config=expected_extra_config, expected=True)) self.assertTrue(TestEsxVmManager._validate_spec_extra_config( spec, config=non_extra_config_metadata, expected=False)) assert_that(spec.flags.diskUuidEnabled, equal_to(True)) def test_customize_vm_with_metadata(self): metadata = { "configuration": { "annotation": "fake_annotation", "serial0.fileType": "network", "serial0.yieldOnMsrRead": "TRUE", "serial0.network.endPoint": "server" }, "parameters": [ {"name": "serial0.fileName"}, {"name": "serial0.vspc"} ] } env = { "serial0.fileName": "vSPC.py", "serial0.vspc": "telnet://1.2.3.4:17000", } spec = self._create_vm_spec(metadata, env) self.vm_manager.customize_vm(spec) assert_that(spec.annotation, equal_to("fake_annotation")) backing = spec.deviceChange[0].device.backing assert_that( backing, instance_of(vim.vm.device.VirtualSerialPort.URIBackingInfo)) assert_that(backing.serviceURI, equal_to('vSPC.py')) assert_that(backing.proxyURI, equal_to('telnet://1.2.3.4:17000')) assert_that(backing.direction, equal_to('server')) @staticmethod def _summarize_controllers_in_spec(cfg_spec, base_type, expected_type): num_scsi_adapters_matching_expected_type = 0 num_scsi_adapters_not_matching_expected_type = 0 for dev_change in cfg_spec.deviceChange: dev = dev_change.device if isinstance(dev, expected_type): num_scsi_adapters_matching_expected_type += 1 elif (isinstance(dev, base_type) and not isinstance(dev, expected_type)): num_scsi_adapters_not_matching_expected_type += 1 return (num_scsi_adapters_matching_expected_type, num_scsi_adapters_not_matching_expected_type) @parameterized.expand([ ("lsilogic", vim.vm.device.VirtualLsiLogicController), ("lsisas1068", vim.vm.device.VirtualLsiLogicSASController), ("pvscsi", vim.vm.device.ParaVirtualSCSIController), ("buslogic", vim.vm.device.VirtualBusLogicController) ]) def test_customize_disk_adapter_type(self, ctlr_type_value, expected_ctlr_type): metadata = { "configuration": {"scsi0.virtualDev": ctlr_type_value} } spec = self._create_vm_spec(metadata, {}) ds = "fake_ds" disk_id = str(uuid.uuid4()) parent_disk_id = str(uuid.uuid4()) capacity_mb = 1024 self.vm_manager.create_child_disk(spec, ds, disk_id, parent_disk_id) self.vm_manager.create_empty_disk(spec, ds, disk_id, capacity_mb) # check that we only create one controller of desired type to attach # to both disks summary = TestEsxVmManager._summarize_controllers_in_spec( spec, vim.vm.device.VirtualSCSIController, expected_ctlr_type) assert_that(summary, equal_to((1, 0))) @parameterized.expand([ ("vmxnet", vim.vm.device.VirtualVmxnet), ("vmxnet2", vim.vm.device.VirtualVmxnet2), ("vmxnet3", vim.vm.device.VirtualVmxnet3), ("vlance", vim.vm.device.VirtualPCNet32), ("e1000", vim.vm.device.VirtualE1000), ("e1000e", vim.vm.device.VirtualE1000e), ]) @patch.object(VimClient, "get_network") def test_customize_nic_adapter_type(self, ctlr_type_value, expected_ctlr_type, mock_get_network): metadata = { "configuration": {"ethernet0.virtualDev": ctlr_type_value} } spec = self._create_vm_spec(metadata, {}) fake_network = MagicMock() fake_network.name = "fake_network_name" mock_get_network.return_value = fake_network self.vm_manager.add_nic(spec, "fake_network_id") summary = TestEsxVmManager._summarize_controllers_in_spec( spec, vim.vm.device.VirtualEthernetCard, expected_ctlr_type) assert_that(summary, equal_to((1, 0))) @parameterized.expand([ ('a.txt', 'Stray file: a.txt'), ('b.vmdk', 'Stray disk (possible data leak): b.vmdk') ]) @patch.object(os.path, "isdir", return_value=True) @patch.object(os.path, "islink", return_value=False) @patch.object(shutil, "rmtree") def test_ensure_directory_cleanup( self, stray_file, expected, rmtree, islink, isdir): """Test cleanup of stray vm directory""" self.vm_manager._logger = MagicMock() with patch.object(os, "listdir", return_value=[stray_file]): self.vm_manager._ensure_directory_cleanup("/vmfs/volumes/fake/vm_vm_foo") rmtree.assert_called_once_with("/vmfs/volumes/fake/vm_vm_foo") self.vm_manager._logger.info.assert_called_once_with(expected) self.vm_manager._logger.warning.assert_called_once_with( "Force delete vm directory /vmfs/volumes/fake/vm_vm_foo") def test_delete_vm(self): """Test deleting a VM""" runtime = MagicMock() runtime.powerState = "poweredOff" vm = MagicMock() vm.runtime = runtime self.vm_manager.vim_client.get_vm = MagicMock(return_value=vm) self.vm_manager.vm_config.get_devices = MagicMock(return_value=[]) self.vm_manager.get_vm_path = MagicMock() self.vm_manager.get_vm_path.return_value = "[fake] vm_foo/xxyy.vmx" self.vm_manager.get_vm_datastore = MagicMock() self.vm_manager.get_vm_datastore.return_value = "fake" self.vm_manager._ensure_directory_cleanup = MagicMock() self.vm_manager.delete_vm("vm_foo") self.vm_manager._ensure_directory_cleanup.assert_called_once_with( "/vmfs/volumes/fake/vm_vm_foo") @parameterized.expand([ ("poweredOn"), ("suspended") ]) def test_delete_vm_wrong_state(self, state): runtime = MagicMock() runtime.powerState = state vm = MagicMock() vm.runtime = runtime self.vm_manager.vim_client.get_vm = MagicMock(return_value=vm) self.assertRaises(VmPowerStateException, self.vm_manager.delete_vm, "vm_foo") def test_add_vm_disk(self): """Test adding VM disk""" self.vm_manager.vim_client.get_vm = MagicMock() self.vm_manager.vm_config.get_devices = MagicMock(return_value=[ DEFAULT_DISK_CONTROLLER_CLASS(key=1000) ]) info = FakeConfigInfo() spec = self.vm_manager.vm_config.update_spec() self.vm_manager.add_disk(spec, "ds1", "vm_foo", info) def test_used_memory(self): self.vm_manager.vim_client.get_vms_in_cache = MagicMock(return_value=[ VmCache(memory_mb=1024), VmCache(), VmCache(memory_mb=2048) ]) memory = self.vm_manager.get_used_memory_mb() self.assertEqual(memory, 2048 + 1024) def atest_remove_vm_disk(self): """Test removing VM disk""" datastore = "ds1" disk_id = "foo" self.vm_manager.vim_client.get_vm = MagicMock() self.vm_manager.vm_config.get_devices = MagicMock(return_value=[ vim.vm.device.VirtualLsiLogicController(key=1000), self.vm_manager.vm_config.create_disk_spec(datastore, disk_id) ]) info = FakeConfigInfo() self.vm_manager.remove_disk("vm_foo", datastore, disk_id, info) def btest_remove_vm_disk_enoent(self): """Test removing VM disk that isn't attached""" self.vm_manager.vim_client.get_vm = MagicMock() self.vm_manager.vm_config.get_devices = MagicMock(return_value=[ self.vm_manager.vm_config.create_disk_spec("ds1", "foo") ]) self.assertRaises(vim.fault.DeviceNotFound, self.vm_manager.remove_disk, "vm_foo", "ds1", "bar") def test_check_ip_v4(self): """Test to check ipv4 validation""" self.assertTrue(NetUtil.is_ipv4_address("1.2.3.4")) self.assertFalse(NetUtil.is_ipv4_address( "FE80:0000:0000:0000:0202:B3FF:FE1E:8329")) self.assertFalse(NetUtil.is_ipv4_address("InvalidAddress")) def test_check_prefix_len_to_netmask_conversion(self): """Check the conversion from prefix length to netmask""" self.assertEqual(NetUtil.prefix_len_to_mask(32), "255.255.255.255") self.assertEqual(NetUtil.prefix_len_to_mask(0), "0.0.0.0") self.assertRaises(ValueError, NetUtil.prefix_len_to_mask, 33) self.assertEqual(NetUtil.prefix_len_to_mask(23), "255.255.254.0") self.assertEqual(NetUtil.prefix_len_to_mask(6), "252.0.0.0") self.assertEqual(NetUtil.prefix_len_to_mask(32), "255.255.255.255") def test_get_vm_network_guest_info(self): """ Tests the guest vm network info, without the vmx returned info. Test 1: Only mac address info available. Test 2: Only mac + ipv4 address available. Test 3: Only mac + ipv6 address available. Test 4: Only mac + ipv6, ipv4 address available. Test 5: No mac or ipv4 address available """ sample_mac_address = "00:0c:29:00:00:01" sample_ip_address = "127.0.0.2" sample_prefix_length = 24 sample_netmask = "255.255.255.0" sample_ipv6_address = "FE80:0000:0000:0000:0202:B3FF:FE1E:8329" sample_network = "VM Network" def _get_v4_address(): ip_address = MagicMock(name="ipv4address") ip_address.ipAddress = sample_ip_address ip_address.prefixLength = sample_prefix_length return ip_address def _get_v6_address(): ip_address = MagicMock(name="ipv6address") ip_address.ipAddress = sample_ipv6_address ip_address.prefixLength = sample_prefix_length return ip_address def _guest_info_1(): """ Only have the mac address. """ net = MagicMock(name="guest_info_1") net.macAddress = sample_mac_address net.connected = True net.network = None return net def _guest_info_2(): """ Have mac and ipv4 address """ net = MagicMock(name="guest_info_2") net.macAddress = sample_mac_address net.ipConfig.ipAddress = [_get_v4_address()] net.network = sample_network net.connected = False return net def _guest_info_3(): """ Have mac and ipv6 address """ net = MagicMock(name="guest_info_3") net.macAddress = sample_mac_address net.ipConfig.ipAddress = [_get_v6_address()] net.connected = False net.network = sample_network return net def _guest_info_4(): """ Have a mac and an ipv4 and an ipv6 address """ net = MagicMock(name="guest_info_4") net.macAddress = sample_mac_address net.network = None net.ipConfig.ipAddress = [_get_v6_address(), _get_v4_address()] net.connected = True return net def _get_vm_no_net_info(vm_id): """ Return empty guest_info """ f = MagicMock(name="get_vm") f.config.uuid = str(uuid.uuid4()) g = MagicMock(name="guest_info") f.guest = g g.net = [] return f def _get_vm(vm_id): """ Return a mocked up guest info object """ f = MagicMock(name="get_vm") g = MagicMock(name="guest_info") f.guest = g net = _guest_info() g.net = [net] return f def _get_vm_vim_guest_info(vm_id): """ Return a real Vim object with reasonable values to validate python typing """ f = MagicMock(name="get_vm") f.config.uuid = str(uuid.uuid4()) g = MagicMock(name="guest_info") f.guest = g net = vim.vm.GuestInfo.NicInfo() ip_config_info = vim.net.IpConfigInfo() net.ipConfig = ip_config_info net.macAddress = sample_mac_address net.network = sample_network net.connected = True ipAddress = vim.net.IpConfigInfo.IpAddress() ipAddress.ipAddress = sample_ip_address ipAddress.prefixLength = sample_prefix_length ip_config_info.ipAddress.append(ipAddress) g.net = [net] return f # Test 1 _guest_info = _guest_info_1 self.vm_manager.vim_client.get_vm = _get_vm self.vm_manager._get_mac_network_mapping = MagicMock(return_value={}) network_info = self.vm_manager.get_vm_network("vm_foo1") expected_1 = VmNetworkInfo(mac_address=sample_mac_address, is_connected=ConnectedStatus.CONNECTED) self.assertEqual(network_info, [expected_1]) # Test 2 _guest_info = _guest_info_2 network_info = self.vm_manager.get_vm_network("vm_foo2") ip_address = Ipv4Address(ip_address=sample_ip_address, netmask=sample_netmask) expected_2 = VmNetworkInfo(mac_address=sample_mac_address, ip_address=ip_address, network=sample_network, is_connected=ConnectedStatus.DISCONNECTED) self.assertEqual(network_info, [expected_2]) # Test 3 _guest_info = _guest_info_3 network_info = self.vm_manager.get_vm_network("vm_foo3") expected_3 = VmNetworkInfo(mac_address=sample_mac_address, network=sample_network, is_connected=ConnectedStatus.DISCONNECTED) self.assertEqual(network_info, [expected_3]) # Test 4 _guest_info = _guest_info_4 network_info = self.vm_manager.get_vm_network("vm_foo4") expected_4 = VmNetworkInfo(mac_address=sample_mac_address, ip_address=ip_address, is_connected=ConnectedStatus.CONNECTED) self.assertEqual(network_info, [expected_4]) # Test 5 self.vm_manager.vim_client.get_vm = _get_vm_no_net_info network_info = self.vm_manager.get_vm_network("vm_foo5") self.assertEqual(network_info, []) # Test 6 self.vm_manager.vim_client.get_vm = _get_vm_vim_guest_info network_info = self.vm_manager.get_vm_network("vm_foo5") expected_6 = VmNetworkInfo(mac_address=sample_mac_address, ip_address=ip_address, network=sample_network, is_connected=ConnectedStatus.CONNECTED) self.assertEqual(network_info, [expected_6]) def test_get_linked_clone_image_path(self): image_path = self.vm_manager.get_linked_clone_image_path # VM not found vm = MagicMock(return_value=None) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # disks is None vm = MagicMock(return_value=VmCache(disks=None)) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # disks is an empty list vm = MagicMock(return_value=VmCache(disks=[])) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # no image disk vm = MagicMock(return_value=VmCache(disks=["a", "b", "c"])) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(None)) # image found image = "[ds1] image_ttylinux/ttylinux.vmdk" vm = MagicMock(return_value=VmCache(disks=["a", "b", image])) self.vm_manager.vim_client.get_vm_in_cache = vm assert_that(image_path("vm1"), is_(datastore_to_os_path(image))) def test_set_vnc_port(self): flavor = Flavor("default", [ QuotaLineItem("vm.memory", "256", Unit.MB), QuotaLineItem("vm.cpu", "1", Unit.COUNT), ]) spec = self.vm_manager.create_vm_spec( "vm_id", "ds1", flavor) self.vm_manager.set_vnc_port(spec, 5901) options = [o for o in spec.extraConfig if o.key == 'RemoteDisplay.vnc.enabled'] assert_that(options[0].value, equal_to('True')) options = [o for o in spec.extraConfig if o.key == 'RemoteDisplay.vnc.port'] assert_that(options[0].value, equal_to(5901)) @patch.object(VimClient, "get_vm") def test_get_vnc_port(self, get_vm): vm_mock = MagicMock() vm_mock.config.extraConfig = [ vim.OptionValue(key="RemoteDisplay.vnc.port", value="5901") ] get_vm.return_value = vm_mock port = self.vm_manager.get_vnc_port("id") assert_that(port, equal_to(5901)) def test_get_resources(self): """ Test that get_resources excludes VMs/disks if it can't find their corresponding datastore UUIDs. """ self.vm_manager.vim_client.get_vms_in_cache = MagicMock(return_value=[ VmCache(path="vm_path_a", disks=["disk_a", "disk_b", "disk_c"]), VmCache(path="vm_path_b", disks=["disk_b", "disk_c", "disk_d"]), VmCache(path="vm_path_c", disks=["disk_c", "disk_d", "disk_e"]), ]) def normalize(name): if name == "vm_path_b" or name == "disk_b": raise DatastoreNotFoundException() return name def mock_get_name(path): return path def mock_get_state(power_state): return State.STOPPED self.vm_manager._ds_manager.normalize.side_effect = normalize self.vm_manager._get_datastore_name_from_ds_path = mock_get_name self.vm_manager._power_state_to_resource_state = mock_get_state # vm_path_b and disk_b are not included in the get_resources response. resources = self.vm_manager.get_resources() assert_that(len(resources), equal_to(2)) assert_that(len(resources[0].disks), equal_to(2)) assert_that(len(resources[1].disks), equal_to(3)) @patch.object(VimClient, "get_vms") def test_get_occupied_vnc_ports(self, get_vms): get_vms.return_value = [self._create_vm_mock(5900), self._create_vm_mock(5901)] ports = self.vm_manager.get_occupied_vnc_ports() assert_that(ports, contains_inanyorder(5900, 5901)) def _create_vm_mock(self, vnc_port): vm = MagicMock() vm.config.extraConfig = [] vm.config.extraConfig.append( vim.OptionValue(key="RemoteDisplay.vnc.port", value=str(vnc_port))) vm.config.extraConfig.append( vim.OptionValue(key="RemoteDisplay.vnc.enabled", value="True")) return vm
class TestVmManager(unittest.TestCase): def setUp(self): if "host_remote_test" not in config: raise SkipTest() self.host = config["host_remote_test"]["server"] self.pwd = config["host_remote_test"]["esx_pwd"] if self.host is None or self.pwd is None: raise SkipTest() self._logger = logging.getLogger(__name__) self.vim_client = VimClient(self.host, "root", self.pwd) self.vm_manager = EsxVmManager(self.vim_client, []) for vm in self.vim_client.get_vms(): vm.Destroy() def tearDown(self): self.vim_client.disconnect(wait=True) @patch('os.path.exists', return_value=True) def test_vnc_ports(self, _exists): vm_id = self._vm_id() port = self._test_port() flavor = Flavor("vm", [QuotaLineItem("vm.cpu", 1, Unit.COUNT), QuotaLineItem("vm.memory", 8, Unit.MB)]) datastore = self.vim_client.get_datastore().name spec = self.vm_manager.create_vm_spec(vm_id, datastore, flavor) self.vm_manager.set_vnc_port(spec, port) try: self.vm_manager.create_vm(vm_id, spec) expected = self.vm_manager.get_vnc_port(vm_id) assert_that(expected, equal_to(port)) ports = self.vm_manager.get_occupied_vnc_ports() assert_that(ports, contains(port)) finally: self.vm_manager.delete_vm(vm_id) @patch('os.path.exists', return_value=True) def test_mks_ticket(self, _exists): vm_id = self._vm_id() flavor = Flavor("vm", [QuotaLineItem("vm.cpu", 1, Unit.COUNT), QuotaLineItem("vm.memory", 8, Unit.MB)]) datastore = self.vim_client.get_datastore().name spec = self.vm_manager.create_vm_spec(vm_id, datastore, flavor) try: self.vm_manager.create_vm(vm_id, spec) self.vm_manager.power_on_vm(vm_id) ticket = self.vm_manager.get_mks_ticket(vm_id) assert_that(ticket.cfg_file, not_none()) assert_that(ticket.ticket, not_none()) finally: self.vm_manager.power_off_vm(vm_id) self.vm_manager.delete_vm(vm_id) @patch('os.path.exists', return_value=True) def test_vminfo(self, _exists): self._test_vminfo({}) self._test_vminfo({"project": "p1"}) self._test_vminfo({"tenant": "t1"}) self._test_vminfo({"project": "p1", "tenant": "t1"}) def _test_vminfo(self, vminfo): vm_id = self._vm_id() flavor = Flavor("vm", [QuotaLineItem("vm.cpu", 1, Unit.COUNT), QuotaLineItem("vm.memory", 8, Unit.MB)]) datastore = self.vim_client.get_datastore().name spec = self.vm_manager.create_vm_spec(vm_id, datastore, flavor) self.vm_manager.set_vminfo(spec, vminfo) try: self.vm_manager.create_vm(vm_id, spec) got_metadata = self.vm_manager.get_vminfo(vm_id) assert_that(got_metadata, equal_to(vminfo)) finally: self.vm_manager.delete_vm(vm_id) def _vm_id(self): vm_id = strftime("%Y-%m-%d-%H%M%S-", localtime()) vm_id += str(random.randint(1, 10000)) return vm_id def _test_port(self): return 5907
class HttpNfcTransferer(HttpTransferer): """ Class for handling HTTP-based disk transfers between ESX hosts. This class employs the ImportVApp and ExportVM APIs to transfer VMDKs efficiently to another host. A shadow VM is created and used in the initial export of the VMDK into the stream optimized format needed by ImportVApp. """ LEASE_INITIALIZATION_WAIT_SECS = 10 def __init__(self, vim_client, image_datastores, host_name="localhost"): super(HttpNfcTransferer, self).__init__(vim_client) self.lock = threading.Lock() self._lease_url_host_name = host_name self._image_datastores = image_datastores self._vm_config = EsxVmConfig(self._vim_client) self._vm_manager = EsxVmManager(self._vim_client, None) def _create_remote_vim_client(self, agent_client, host): request = ServiceTicketRequest(service_type=ServiceType.VIM) response = agent_client.get_service_ticket(request) if response.result != ServiceTicketResultCode.OK: self._logger.info("Get service ticket failed. Response = %s" % str(response)) raise ValueError("No ticket") vim_client = VimClient(host=host, ticket=response.vim_ticket, auto_sync=False) return vim_client def _get_disk_url_from_lease(self, lease): for dev_url in lease.info.deviceUrl: self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url)) return dev_url.url def _wait_for_lease(self, lease): retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS state = None while retries > 0: state = lease.state if state != vim.HttpNfcLease.State.initializing: break retries -= 1 time.sleep(1) if retries == 0: self._logger.debug("Nfc lease initialization timed out") raise NfcLeaseInitiatizationTimeout() if state == vim.HttpNfcLease.State.error: self._logger.debug("Fail to initialize nfc lease: %s" % str(lease.error)) raise NfcLeaseInitiatizationError() def _ensure_host_in_url(self, url, actual_host): # URLs from vApp export/import leases have '*' as placeholder # for host names that has to be replaced with the actual # host on which the resource resides. protocol, host, selector = self._split_url(url) if host.find("*") != -1: host = host.replace("*", actual_host) return "%s://%s%s" % (protocol, host, selector) def _export_shadow_vm(self, shadow_vm_id): """ Initiates the Export VM operation. The lease created as part of ExportVM contains, among other things, the url to the stream-optimized disk of the image currently associated with the VM being exported. """ vm = self._vim_client.get_vm_obj_in_cache(shadow_vm_id) lease = vm.ExportVm() self._wait_for_lease(lease) return lease, self._get_disk_url_from_lease(lease) def _get_shadow_vm_datastore(self): # The datastore in which the shadow VM will be created. return self._image_datastores[0] def _create_shadow_vm(self): """ Creates a shadow vm specifically for use by this host. The shadow VM created is used to facilitate host-to-host transfer of any image accessible on this host to another datastore not directly accessible from this host. """ shadow_vm_id = SHADOW_VM_NAME_PREFIX + str(uuid.uuid4()) spec = self._vm_config.create_spec( vm_id=shadow_vm_id, datastore=self._get_shadow_vm_datastore(), memory=32, cpus=1) try: self._vm_manager.create_vm(shadow_vm_id, spec) except Exception: self._logger.exception("Error creating vm with id %s" % shadow_vm_id) raise return shadow_vm_id def _delete_shadow_vm(self, shadow_vm_id): try: # detach disk so it is not deleted along with vm spec = self._vm_manager.update_vm_spec() info = self._vm_manager.get_vm_config(shadow_vm_id) self._vm_manager.remove_all_disks(spec, info) self._vm_manager.update_vm(shadow_vm_id, spec) # delete the vm self._vm_manager.delete_vm(shadow_vm_id, force=True) except Exception: self._logger.exception("Error deleting vm with id %s" % shadow_vm_id) def _configure_shadow_vm_with_disk(self, image_id, image_datastore, shadow_vm_id): """ Reconfigures the shadow vm to contain only one image disk. """ try: spec = self._vm_manager.update_vm_spec() info = self._vm_manager.get_vm_config(shadow_vm_id) self._vm_manager.add_disk(spec, image_datastore, image_id, info, disk_is_image=True) self._vm_manager.update_vm(shadow_vm_id, spec) except Exception: self._logger.exception( "Error configuring shadow vm with image %s" % image_id) raise def _get_image_stream_from_shadow_vm(self, image_id, image_datastore, shadow_vm_id): """ Obtain a handle to the streamOptimized disk from shadow vm. The stream-optimized disk is obtained via configuring a shadow VM with the image disk we are interested in and exporting the reconfigured shadow VM. """ self._configure_shadow_vm_with_disk(image_id, image_datastore, shadow_vm_id) lease, disk_url = self._export_shadow_vm(shadow_vm_id) disk_url = self._ensure_host_in_url(disk_url, self._lease_url_host_name) return lease, disk_url def _prepare_receive_image(self, agent_client, image_id, datastore): request = PrepareReceiveImageRequest(image_id, datastore) response = agent_client.prepare_receive_image(request) if response.result != PrepareReceiveImageResultCode.OK: err_msg = "Failed to prepare receive image. Response = %s" % str( response) self._logger.info(err_msg) raise ValueError(err_msg) return response.import_vm_path, response.import_vm_id def _create_import_vm_spec(self, vm_id, datastore, vm_path): spec = EsxVmConfigSpec(vm_id, "otherGuest", 32, 1, vm_path, None) # Just specify a tiny capacity in the spec for now; the eventual vm # disk will be based on what is uploaded via the http nfc url. spec = self._vm_manager.create_empty_disk(spec, datastore, None, size_mb=1) import_spec = vim.vm.VmImportSpec(configSpec=spec) return import_spec def _get_url_from_import_vm(self, dst_vim_client, import_spec): vm_folder = dst_vim_client.vm_folder root_rp = dst_vim_client.root_resource_pool lease = root_rp.ImportVApp(import_spec, vm_folder) self._wait_for_lease(lease) disk_url = self._get_disk_url_from_lease(lease) disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host) return lease, disk_url def _register_imported_image_at_host(self, agent_client, image_id, destination_datastore, imported_vm_name, metadata): """ Installs an image at another host. Image data was transferred via ImportVApp to said host. """ request = ReceiveImageRequest(image_id=image_id, datastore_id=destination_datastore, transferred_image_id=imported_vm_name, metadata=metadata) response = agent_client.receive_image(request) if response.result == ReceiveImageResultCode.DESTINATION_ALREADY_EXIST: raise DiskAlreadyExistException(response.error) if response.result != ReceiveImageResultCode.OK: raise ReceiveImageException(response.result, response.error) def _read_metadata(self, image_datastore, image_id): try: # Transfer raw metadata metadata_path = os_metadata_path(image_datastore, image_id, IMAGE_FOLDER_NAME_PREFIX) metadata = None if os.path.exists(metadata_path): with open(metadata_path, 'r') as f: metadata = f.read() return metadata except: self._logger.exception("Failed to read metadata") raise def _send_image(self, agent_client, host, tmp_path, spec): vim_client = self._create_remote_vim_client(agent_client, host) try: write_lease, disk_url = self._get_url_from_import_vm( vim_client, spec) try: self.upload_file(tmp_path, disk_url, write_lease) finally: write_lease.Complete() finally: vim_client.disconnect() @lock_non_blocking def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): if destination_image_id is None: destination_image_id = image_id metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() # place transfer.vmdk under shadow_vm_path to work around VSAN's restriction on # files at datastore top-level shadow_vm_path = os_datastore_path( self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id)) transfer_vmdk_path = os.path.join(shadow_vm_path, "transfer.vmdk") self._logger.info("transfer_vmdk_path = %s" % transfer_vmdk_path) agent_client = None try: read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, transfer_vmdk_path, read_lease) finally: read_lease.Complete() agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() vm_path, vm_id = self._prepare_receive_image( agent_client, destination_image_id, destination_datastore) spec = self._create_import_vm_spec(vm_id, destination_datastore, vm_path) self._send_image(agent_client, host, transfer_vmdk_path, spec) self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, vm_id, metadata) return vm_id finally: try: os.unlink(transfer_vmdk_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf(shadow_vm_path) if agent_client: agent_client.close()
class TestVmManager(unittest.TestCase): def setUp(self): if "host_remote_test" not in config: raise SkipTest() self.host = config["host_remote_test"]["server"] self.pwd = config["host_remote_test"]["esx_pwd"] if self.host is None or self.pwd is None: raise SkipTest() self._logger = logging.getLogger(__name__) self.vim_client = VimClient(self.host, "root", self.pwd) self.vm_manager = EsxVmManager(self.vim_client, []) for vm in self.vim_client.get_vms(): vm.Destroy() def tearDown(self): self.vim_client.disconnect(wait=True) @patch('os.path.exists', return_value=True) def test_vnc_ports(self, _exists): vm_id = self._vm_id() port = self._test_port() flavor = Flavor("vm", [QuotaLineItem("vm.cpu", 1, Unit.COUNT), QuotaLineItem("vm.memory", 8, Unit.MB)]) datastore = self.vim_client.get_datastore().name spec = self.vm_manager.create_vm_spec(vm_id, datastore, flavor) self.vm_manager.set_vnc_port(spec, port) try: self.vm_manager.create_vm(vm_id, spec) expected = self.vm_manager.get_vnc_port(vm_id) assert_that(expected, equal_to(port)) ports = self.vm_manager.get_occupied_vnc_ports() assert_that(ports, contains(port)) finally: self.vm_manager.delete_vm(vm_id) @patch('os.path.exists', return_value=True) def test_mks_ticket(self, _exists): vm_id = self._vm_id() flavor = Flavor("vm", [QuotaLineItem("vm.cpu", 1, Unit.COUNT), QuotaLineItem("vm.memory", 8, Unit.MB)]) datastore = self.vim_client.get_datastore().name spec = self.vm_manager.create_vm_spec(vm_id, datastore, flavor) try: self.vm_manager.create_vm(vm_id, spec) self.vm_manager.power_on_vm(vm_id) ticket = self.vm_manager.get_mks_ticket(vm_id) assert_that(ticket.cfg_file, not_none()) assert_that(ticket.ticket, not_none()) finally: self.vm_manager.power_off_vm(vm_id) self.vm_manager.delete_vm(vm_id) def _vm_id(self): vm_id = strftime("%Y-%m-%d-%H%M%S-", localtime()) vm_id += str(random.randint(1, 10000)) return vm_id def _test_port(self): return 5907
class HttpNfcTransferer(HttpTransferer): """ Class for handling HTTP-based disk transfers between ESX hosts. This class employs the ImportVApp and ExportVM APIs to transfer VMDKs efficiently to another host. A shadow VM is created and used in the initial export of the VMDK into the stream optimized format needed by ImportVApp. """ LEASE_INITIALIZATION_WAIT_SECS = 10 def __init__(self, vim_client, image_datastores, host_name="localhost"): super(HttpNfcTransferer, self).__init__(vim_client) self.lock = threading.Lock() self._shadow_vm_id = "shadow_%s" % self._vim_client.host_uuid self._lease_url_host_name = host_name self._image_datastores = image_datastores self._vm_config = EsxVmConfig(self._vim_client) self._vm_manager = EsxVmManager(self._vim_client, None) def _get_remote_connections(self, host, port): agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() request = ServiceTicketRequest(service_type=ServiceType.VIM) response = agent_client.get_service_ticket(request) if response.result != ServiceTicketResultCode.OK: self._logger.info("Get service ticket failed. Response = %s" % str(response)) raise ValueError("No ticket") vim_client = VimClient( host=host, ticket=response.vim_ticket, auto_sync=False) return agent_client, vim_client def _get_disk_url_from_lease(self, lease): for dev_url in lease.info.deviceUrl: self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url)) return dev_url.url def _wait_for_lease(self, lease): retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS state = None while retries > 0: state = lease.state if state != vim.HttpNfcLease.State.initializing: break retries -= 1 time.sleep(1) if retries == 0: self._logger.debug("Nfc lease initialization timed out") raise NfcLeaseInitiatizationTimeout() if state == vim.HttpNfcLease.State.error: self._logger.debug("Fail to initialize nfc lease: %s" % str(lease.error)) raise NfcLeaseInitiatizationError() def _ensure_host_in_url(self, url, actual_host): # URLs from vApp export/import leases have '*' as placeholder # for host names that has to be replaced with the actual # host on which the resource resides. protocol, host, selector = self._split_url(url) if host.find("*") != -1: host = host.replace("*", actual_host) return "%s://%s%s" % (protocol, host, selector) def _export_shadow_vm(self): """ Initiates the Export VM operation. The lease created as part of ExportVM contains, among other things, the url to the stream-optimized disk of the image currently associated with the VM being exported. """ vm = self._vim_client.get_vm_obj_in_cache(self._shadow_vm_id) lease = vm.ExportVm() self._wait_for_lease(lease) return lease, self._get_disk_url_from_lease(lease) def _get_shadow_vm_datastore(self): # The datastore in which the shadow VM will be created. return self._image_datastores[0] def _ensure_shadow_vm(self): """ Creates a shadow vm specifically for use by this host if absent. The shadow VM created is used to facilitate host-to-host transfer of any image accessible on this host to another datastore not directly accessible from this host. """ vm_id = self._shadow_vm_id if self._vm_manager.has_vm(vm_id): self._logger.debug("shadow vm exists") return spec = self._vm_config.create_spec( vm_id=vm_id, datastore=self._get_shadow_vm_datastore(), memory=32, cpus=1) try: self._vm_manager.create_vm(vm_id, spec) except Exception: self._logger.exception("Error creating vm with id %s" % vm_id) raise def _configure_shadow_vm_with_disk(self, image_id, image_datastore): """ Reconfigures the shadow vm to contain only one image disk. """ try: spec = self._vm_manager.update_vm_spec() info = self._vm_manager.get_vm_config(self._shadow_vm_id) self._vm_manager.remove_all_disks(spec, info) self._vm_manager.add_disk(spec, image_datastore, image_id, info, disk_is_image=True) self._vm_manager.update_vm(self._shadow_vm_id, spec) except Exception: self._logger.exception( "Error configuring shadow vm with image %s" % image_id) raise def _get_image_stream_from_shadow_vm(self, image_id, image_datastore): """ Obtain a handle to the streamOptimized disk from shadow vm. The stream-optimized disk is obtained via configuring a shadow VM with the image disk we are interested in and exporting the reconfigured shadow VM. """ self._ensure_shadow_vm() self._configure_shadow_vm_with_disk(image_id, image_datastore) lease, disk_url = self._export_shadow_vm() disk_url = self._ensure_host_in_url(disk_url, self._lease_url_host_name) return lease, disk_url def _create_import_vm_spec(self, image_id, datastore): vm_name = "h2h_%s" % str(uuid.uuid4()) spec = self._vm_config.create_spec_for_import(vm_id=vm_name, image_id=image_id, datastore=datastore, memory=32, cpus=1) # Just specify a tiny capacity in the spec for now; the eventual vm # disk will be based on what is uploaded via the http nfc url. spec = self._vm_manager.create_empty_disk(spec, datastore, None, size_mb=1) import_spec = vim.vm.VmImportSpec(configSpec=spec) return import_spec def _get_url_from_import_vm(self, dst_vim_client, import_spec): vm_folder = dst_vim_client.vm_folder root_rp = dst_vim_client.root_resource_pool lease = root_rp.ImportVApp(import_spec, vm_folder) self._wait_for_lease(lease) disk_url = self._get_disk_url_from_lease(lease) disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host) return lease, disk_url def _register_imported_image_at_host(self, agent_client, image_id, destination_datastore, imported_vm_name, metadata, manifest): """ Installs an image at another host. Image data was transferred via ImportVApp to said host. """ request = ReceiveImageRequest( image_id=image_id, datastore_id=destination_datastore, transferred_image_id=imported_vm_name, metadata=metadata, manifest=manifest, ) response = agent_client.receive_image(request) if response.result == ReceiveImageResultCode.DESTINATION_ALREADY_EXIST: raise DiskAlreadyExistException(response.error) if response.result != ReceiveImageResultCode.OK: raise ReceiveImageException(response.result, response.error) def _read_metadata(self, image_datastore, image_id): try: # Transfer raw manifest manifest_path = os_image_manifest_path(image_datastore, image_id) with open(manifest_path) as f: manifest = f.read() # Transfer raw metadata metadata_path = os_metadata_path(image_datastore, image_id, IMAGE_FOLDER_NAME) metadata = None if os.path.exists(metadata_path): with open(metadata_path, 'r') as f: metadata = f.read() return manifest, metadata except: self._logger.exception("Failed to read metadata") raise @lock_non_blocking def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port, intermediate_file_path=None): manifest, metadata = self._read_metadata(image_datastore, image_id) read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore) # Save stream-optimized disk to a unique path locally for now. # TODO(vui): Switch to chunked transfers to handle not knowing content # length in the full streaming mode. if intermediate_file_path: tmp_path = intermediate_file_path else: tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % ( self._get_shadow_vm_datastore(), self._shadow_vm_id) try: self.download_file(disk_url, tmp_path) finally: read_lease.Complete() if destination_image_id is None: destination_image_id = image_id spec = self._create_import_vm_spec( destination_image_id, destination_datastore) agent_client, vim_client = self._get_remote_connections(host, port) try: write_lease, disk_url = self._get_url_from_import_vm(vim_client, spec) try: self.upload_file(tmp_path, disk_url) finally: write_lease.Complete() try: os.unlink(tmp_path) except OSError: pass # TODO(vui): imported vm name should be made unique to remove # ambiguity during subsequent lookup imported_vm_name = destination_image_id self._register_imported_image_at_host( agent_client, destination_image_id, destination_datastore, imported_vm_name, metadata, manifest) finally: agent_client.close() vim_client.disconnect() return imported_vm_name
class HttpNfcTransferer(HttpTransferer): """ Class for handling HTTP-based disk transfers between ESX hosts. This class employs the ImportVApp and ExportVM APIs to transfer VMDKs efficiently to another host. A shadow VM is created and used in the initial export of the VMDK into the stream optimized format needed by ImportVApp. """ LEASE_INITIALIZATION_WAIT_SECS = 10 def __init__(self, vim_client, image_datastores, host_name="localhost"): super(HttpNfcTransferer, self).__init__(vim_client) self.lock = threading.Lock() self._shadow_vm_id = "shadow_%s" % self._vim_client.host_uuid self._lease_url_host_name = host_name self._image_datastores = image_datastores self._vm_config = EsxVmConfig(self._vim_client) self._vm_manager = EsxVmManager(self._vim_client, None) def _get_remote_connections(self, host, port): agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() request = ServiceTicketRequest(service_type=ServiceType.VIM) response = agent_client.get_service_ticket(request) if response.result != ServiceTicketResultCode.OK: self._logger.info("Get service ticket failed. Response = %s" % str(response)) raise ValueError("No ticket") vim_client = VimClient(host=host, ticket=response.vim_ticket, auto_sync=False) return agent_client, vim_client def _get_disk_url_from_lease(self, lease): for dev_url in lease.info.deviceUrl: self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url)) return dev_url.url def _wait_for_lease(self, lease): retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS state = None while retries > 0: state = lease.state if state != vim.HttpNfcLease.State.initializing: break retries -= 1 time.sleep(1) if retries == 0: self._logger.debug("Nfc lease initialization timed out") raise NfcLeaseInitiatizationTimeout() if state == vim.HttpNfcLease.State.error: self._logger.debug("Fail to initialize nfc lease: %s" % str(lease.error)) raise NfcLeaseInitiatizationError() def _ensure_host_in_url(self, url, actual_host): # URLs from vApp export/import leases have '*' as placeholder # for host names that has to be replaced with the actual # host on which the resource resides. protocol, host, selector = self._split_url(url) if host.find("*") != -1: host = host.replace("*", actual_host) return "%s://%s%s" % (protocol, host, selector) def _export_shadow_vm(self): """ Initiates the Export VM operation. The lease created as part of ExportVM contains, among other things, the url to the stream-optimized disk of the image currently associated with the VM being exported. """ vm = self._vim_client.get_vm_obj_in_cache(self._shadow_vm_id) lease = vm.ExportVm() self._wait_for_lease(lease) return lease, self._get_disk_url_from_lease(lease) def _get_shadow_vm_datastore(self): # The datastore in which the shadow VM will be created. return self._image_datastores[0] def _ensure_shadow_vm(self): """ Creates a shadow vm specifically for use by this host if absent. The shadow VM created is used to facilitate host-to-host transfer of any image accessible on this host to another datastore not directly accessible from this host. """ vm_id = self._shadow_vm_id if self._vm_manager.has_vm(vm_id): self._logger.debug("shadow vm exists") return spec = self._vm_config.create_spec( vm_id=vm_id, datastore=self._get_shadow_vm_datastore(), memory=32, cpus=1) try: self._vm_manager.create_vm(vm_id, spec) except Exception: self._logger.exception("Error creating vm with id %s" % vm_id) raise def _configure_shadow_vm_with_disk(self, image_id, image_datastore): """ Reconfigures the shadow vm to contain only one image disk. """ try: spec = self._vm_manager.update_vm_spec() info = self._vm_manager.get_vm_config(self._shadow_vm_id) self._vm_manager.remove_all_disks(spec, info) self._vm_manager.add_disk(spec, image_datastore, image_id, info, disk_is_image=True) self._vm_manager.update_vm(self._shadow_vm_id, spec) except Exception: self._logger.exception( "Error configuring shadow vm with image %s" % image_id) raise def _get_image_stream_from_shadow_vm(self, image_id, image_datastore): """ Obtain a handle to the streamOptimized disk from shadow vm. The stream-optimized disk is obtained via configuring a shadow VM with the image disk we are interested in and exporting the reconfigured shadow VM. """ self._ensure_shadow_vm() self._configure_shadow_vm_with_disk(image_id, image_datastore) lease, disk_url = self._export_shadow_vm() disk_url = self._ensure_host_in_url(disk_url, self._lease_url_host_name) return lease, disk_url def _create_import_vm_spec(self, image_id, datastore): vm_name = "h2h_%s" % str(uuid.uuid4()) spec = self._vm_config.create_spec_for_import(vm_id=vm_name, image_id=image_id, datastore=datastore, memory=32, cpus=1) # Just specify a tiny capacity in the spec for now; the eventual vm # disk will be based on what is uploaded via the http nfc url. spec = self._vm_manager.create_empty_disk(spec, datastore, None, size_mb=1) import_spec = vim.vm.VmImportSpec(configSpec=spec) return import_spec def _get_url_from_import_vm(self, dst_vim_client, import_spec): vm_folder = dst_vim_client.vm_folder root_rp = dst_vim_client.root_resource_pool lease = root_rp.ImportVApp(import_spec, vm_folder) self._wait_for_lease(lease) disk_url = self._get_disk_url_from_lease(lease) disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host) return lease, disk_url def _register_imported_image_at_host(self, agent_client, image_id, destination_datastore, imported_vm_name): """ Installs an image at another host. Image data was transferred via ImportVApp to said host. """ request = ReceiveImageRequest(image_id=image_id, datastore_id=destination_datastore, transferred_image_id=imported_vm_name) response = agent_client.receive_image(request) if response.result != ReceiveImageResultCode.OK: raise ReceiveImageException(response.result, response.error) @lock_non_blocking def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port, intermediate_file_path=None): read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore) # Save stream-optimized disk to a unique path locally for now. # TODO(vui): Switch to chunked transfers to handle not knowing content # length in the full streaming mode. if intermediate_file_path: tmp_path = intermediate_file_path else: tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % ( self._get_shadow_vm_datastore(), self._shadow_vm_id) try: self.download_file(disk_url, tmp_path) finally: read_lease.Complete() if destination_image_id is None: destination_image_id = image_id spec = self._create_import_vm_spec(destination_image_id, destination_datastore) agent_client, vim_client = self._get_remote_connections(host, port) try: write_lease, disk_url = self._get_url_from_import_vm( vim_client, spec) try: self.upload_file(tmp_path, disk_url) finally: write_lease.Complete() try: os.unlink(tmp_path) except OSError: pass # TODO(vui): imported vm name should be made unique to remove # ambiguity during subsequent lookup imported_vm_name = destination_image_id self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, imported_vm_name) finally: agent_client.close() vim_client.disconnect() return imported_vm_name