def test_nonexistent_datastores(self, mkdir_mock): """Test that non-existent datastore get filtered out.""" vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ["datastore2", "/vmfs/volumes/id-2", "VMFS", True], ["datastore3", "/vmfs/volumes/id-3", "VMFS", True], ]) hypervisor = MagicMock() hypervisor.vim_client = vim_client ds_list = ["datastore1", "bad-datastore1"] image_ds = [ { "name": "datastore2", "used_for_vms": True }, { "name": "datastore3", "used_for_vms": False }, { "name": "bad-datastores2", "used_for_vms": False }, ] manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that(manager.get_datastore_ids(), contains_inanyorder("id-1", "id-2", "id-3")) assert_that(manager.vm_datastores(), is_(["id-1"])) assert_that(manager.image_datastores(), contains_inanyorder("id-2", "id-3")) assert_that(manager.initialized, is_(True))
def test_single_datastore(self, mkdir_mock): """Test that datastore manager works with a single datastore.""" hypervisor = MagicMock() vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ # name, url, type, local ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ]) hypervisor.vim_client = vim_client # No valid datastore. One image datastore for cloud VMs. ds_list = [] image_ds = [{"name": "datastore1", "used_for_vms": True}] ds_manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that(ds_manager.initialized, is_(True)) assert_that(ds_manager.get_datastore_ids(), is_(["id-1"])) assert_that(ds_manager.vm_datastores(), is_([])) assert_that(ds_manager.image_datastores(), is_(["id-1"])) # No valid datastore. No image datastore for cloud VMs. ds_list = ["bad-ds"] image_ds = [{"name": "datastore1", "used_for_vms": False}] ds_manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that(ds_manager.initialized, is_(False))
def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=lambda: suicide()) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid self.set_memory_overcommit(agent_config.memory_overcommit) self.datastore_manager = EsxDatastoreManager( self, agent_config.datastores, agent_config.image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer( self.vim_client, self.datastore_manager.image_datastores()) atexit.register(self.image_manager.cleanup)
def test_empty_url_datastores(self, mkdir_mock): """Test that datastores with empty url get filtered out.""" vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ ["datastore1", "", "VMFS", True], ["datastore2", None, "VMFS", True], ["datastore3", "/vmfs/volumes/id-3", "VMFS", True], ]) hypervisor = MagicMock() hypervisor.vim_client = vim_client ds_list = ["datastore1", "datastore2", "datastore3"] image_ds = [{"name": "datastore3", "used_for_vms": True}] manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that(manager.get_datastore_ids(), contains_inanyorder("id-3"))
def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. errback = lambda: suicide() self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=errback) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid self.set_memory_overcommit(agent_config.memory_overcommit) image_datastores = [ds["name"] for ds in agent_config.image_datastores] self.datastore_manager = EsxDatastoreManager( self, agent_config.datastores, agent_config.image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer(self.vim_client, image_datastores) atexit.register(self.image_manager.cleanup)
def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. errback = lambda: suicide() self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=errback) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid # Enable/Disable large page support. If this host is removed # from the deployment, large page support will need to be # explicitly updated by the user. disable_large_pages = agent_config.memory_overcommit > 1.0 self.vim_client.set_large_page_support(disable=disable_large_pages) image_datastores = [ds["name"] for ds in agent_config.image_datastores] self.datastore_manager = EsxDatastoreManager( self, agent_config.datastores, image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() atexit.register(self.image_manager.cleanup)
def test_nonexistent_datastores(self, mkdir_mock): """Test that non-existent datastore get filtered out.""" vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ["datastore2", "/vmfs/volumes/id-2", "VMFS", True], ["datastore3", "/vmfs/volumes/id-3", "VMFS", True], ]) hypervisor = MagicMock() hypervisor.vim_client = vim_client ds_list = ["datastore1", "bad-datastore1"] image_ds = [ {"name": "datastore2", "used_for_vms": True}, {"name": "datastore3", "used_for_vms": False}, {"name": "bad-datastores2", "used_for_vms": False}, ] manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that(manager.get_datastore_ids(), is_(["id-1", "id-2", "id-3"])) assert_that(manager.vm_datastores(), is_(["id-1"])) assert_that(manager.image_datastores(), is_(["id-2", "id-3"])) assert_that(manager.initialized, is_(True))
def test_multiple_image_datastores(self, mkdir_mock): """Test that datastore manager works with multiple image datastores.""" vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ["datastore2", "/vmfs/volumes/id-2", "VMFS", True], ["datastore3", "/vmfs/volumes/id-3", "VMFS", True], ]) hypervisor = MagicMock() hypervisor.vim_client = vim_client ds_list = ["datastore1"] image_ds = [ {"name": "datastore2", "used_for_vms": True}, {"name": "datastore3", "used_for_vms": False}, ] ds_manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that(ds_manager.get_datastore_ids(), is_(["id-1", "id-2", "id-3"])) assert_that(ds_manager.vm_datastores(), is_(["id-1"])) assert_that(ds_manager.image_datastores(), is_(["id-2", "id-3"])) assert_that(ds_manager.initialized, is_(True))
def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. errback = lambda: suicide() self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=errback) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid # Enable/Disable large page support. If this host is removed # from the deployment, large page support will need to be # explicitly updated by the user. disable_large_pages = agent_config.memory_overcommit > 1.0 self.vim_client.set_large_page_support(disable=disable_large_pages) image_datastores = [ds["name"] for ds in agent_config.image_datastores] self.datastore_manager = EsxDatastoreManager(self, agent_config.datastores, image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer(self.vim_client, image_datastores) atexit.register(self.image_manager.cleanup)
class EsxHypervisor(object): """Manage ESX Hypervisor.""" def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=lambda: suicide()) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid self.set_memory_overcommit(agent_config.memory_overcommit) self.datastore_manager = EsxDatastoreManager( self, agent_config.datastores, agent_config.image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer( self.vim_client, self.datastore_manager.image_datastores()) atexit.register(self.image_manager.cleanup) @property def uuid(self): return self._uuid def check_image(self, image_id, datastore_id): return self.image_manager.check_image( image_id, self.datastore_manager.datastore_name(datastore_id)) def acquire_vim_ticket(self): return self.vim_client.acquire_clone_ticket() def acquire_cgi_ticket(self, url, op): return self.vim_client.acquire_cgi_ticket(url, op) def add_update_listener(self, listener): self.vim_client.add_update_listener(listener) def remove_update_listener(self, listener): self.vim_client.remove_update_listener(listener) def transfer_image(self, source_image_id, source_datastore, destination_image_id, destination_datastore, host, port): return self.image_transferer.send_image_to_host( source_image_id, source_datastore, destination_image_id, destination_datastore, host, port) def receive_image(self, image_id, datastore, imported_vm_name, metadata, manifest): self.image_manager.receive_image(image_id, datastore, imported_vm_name, metadata, manifest) def set_memory_overcommit(self, memory_overcommit): # Enable/Disable large page support. If this host is removed # from the deployment, large page support will need to be # explicitly updated by the user. disable_large_pages = memory_overcommit > 1.0 self.vim_client.set_large_page_support(disable=disable_large_pages)
def test_get_datastores(self, mkdir_mock): """ Test esx datastore manager with different datastore types. Verify the datastore types are correctly parsed and all the datastores are populated. """ hypervisor = MagicMock() vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ # name, url, type, local ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ["datastore2", "/vmfs/volumes/id-2", "VMFS", False], ["datastore3", "/vmfs/volumes/id-3", "NFS", None], ["datastore4", "/vmfs/volumes/id-4", "NFSV41", None], ["datastore5", "/vmfs/volumes/id-5", "vsan", None], ["datastore6", "/vmfs/volumes/id-6", "VFFS", None], ]) hypervisor.vim_client = vim_client ds_list = [ "datastore1", "datastore2", "datastore3", "datastore4", "datastore5", "datastore6" ] image_ds = [{"name": "datastore2", "used_for_vms": False}] ds_manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) expected_call_args = [] for ds in ds_list: for folder in [ DISK_FOLDER_NAME, VM_FOLDER_NAME, IMAGE_FOLDER_NAME, TMP_IMAGE_FOLDER_NAME ]: expected_call_args.append('/vmfs/volumes/%s/%s' % (ds, folder)) called_args = [c[0][0] for c in mkdir_mock.call_args_list] assert_that(called_args, contains_inanyorder(*expected_call_args)) assert_that( ds_manager.get_datastore_ids(), contains_inanyorder("id-1", "id-2", "id-3", "id-4", "id-5", "id-6")) assert_that( ds_manager.vm_datastores(), contains_inanyorder("id-1", "id-3", "id-4", "id-5", "id-6")) datastores = ds_manager.get_datastores() assert_that( datastores, contains_inanyorder( Datastore("id-1", "datastore1", type=DSType.LOCAL_VMFS, tags=set([LOCAL_VMFS_TAG])), Datastore("id-2", "datastore2", type=DSType.SHARED_VMFS, tags=set([SHARED_VMFS_TAG])), Datastore("id-3", "datastore3", type=DSType.NFS_3, tags=set([NFS_TAG])), Datastore("id-4", "datastore4", type=DSType.NFS_41, tags=set([NFS_TAG])), Datastore("id-5", "datastore5", type=DSType.VSAN, tags=set()), Datastore("id-6", "datastore6", type=DSType.OTHER, tags=set()))) assert_that(ds_manager.image_datastores(), is_(["id-2"])) assert_that(ds_manager.datastore_type("id-1"), is_(DSType.LOCAL_VMFS)) assert_that(ds_manager.datastore_type("id-2"), is_(DSType.SHARED_VMFS)) assert_that(ds_manager.datastore_type("id-3"), is_(DSType.NFS_3)) assert_that(ds_manager.datastore_type("id-4"), is_(DSType.NFS_41)) assert_that(ds_manager.datastore_type("id-5"), is_(DSType.VSAN)) assert_that(ds_manager.datastore_type("id-6"), is_(DSType.OTHER)) # test normalize assert_that(ds_manager.normalize("id-1"), is_("id-1")) assert_that(ds_manager.normalize("datastore1"), is_("id-1"))
def test_get_datastores(self): """ Test esx datastore manager with different datastore types. Verify the datastore types are correctly parsed and all the datastores are populated. """ hypervisor = MagicMock() vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ # name, url, type, local ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ["datastore2", "/vmfs/volumes/id-2", "VMFS", False], ["datastore3", "/vmfs/volumes/id-3", "NFS", None], ["datastore4", "/vmfs/volumes/id-4", "NFSV41", None], ["datastore5", "/vmfs/volumes/id-5", "vsan", None], ["datastore6", "/vmfs/volumes/id-6", "VFFS", None], ]) hypervisor.vim_client = vim_client ds_list = [ "datastore1", "datastore2", "datastore3", "datastore4", "datastore5", "datastore6" ] image_ds = [{"name": "datastore2", "used_for_vms": False}] ds_manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that( ds_manager.get_datastore_ids(), contains_inanyorder("id-1", "id-2", "id-3", "id-4", "id-5", "id-6")) assert_that( ds_manager.vm_datastores(), contains_inanyorder("id-1", "id-3", "id-4", "id-5", "id-6")) datastores = ds_manager.get_datastores() assert_that( datastores, contains_inanyorder( Datastore("id-1", "datastore1", type=DSType.LOCAL_VMFS, tags=set([LOCAL_VMFS_TAG])), Datastore("id-2", "datastore2", type=DSType.SHARED_VMFS, tags=set([SHARED_VMFS_TAG])), Datastore("id-3", "datastore3", type=DSType.NFS_3, tags=set([NFS_TAG])), Datastore("id-4", "datastore4", type=DSType.NFS_41, tags=set([NFS_TAG])), Datastore("id-5", "datastore5", type=DSType.VSAN, tags=set([VSAN_TAG])), Datastore("id-6", "datastore6", type=DSType.OTHER, tags=set()))) assert_that(ds_manager.image_datastores(), is_(["id-2"])) assert_that(ds_manager.datastore_type("id-1"), is_(DSType.LOCAL_VMFS)) assert_that(ds_manager.datastore_type("id-2"), is_(DSType.SHARED_VMFS)) assert_that(ds_manager.datastore_type("id-3"), is_(DSType.NFS_3)) assert_that(ds_manager.datastore_type("id-4"), is_(DSType.NFS_41)) assert_that(ds_manager.datastore_type("id-5"), is_(DSType.VSAN)) assert_that(ds_manager.datastore_type("id-6"), is_(DSType.OTHER)) # test normalize assert_that(ds_manager.normalize("id-1"), is_("id-1")) assert_that(ds_manager.normalize("datastore1"), is_("id-1"))
def test_get_datastores(self, mkdir_mock): """ Test esx datastore manager with different datastore types. Verify the datastore types are correctly parsed and all the datastores are populated. """ hypervisor = MagicMock() vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ # name, url, type, local ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ["datastore2", "/vmfs/volumes/id-2", "VMFS", False], ["datastore3", "/vmfs/volumes/id-3", "NFS", None], ["datastore4", "/vmfs/volumes/id-4", "NFSV41", None], ["datastore5", "/vmfs/volumes/id-5", "vsan", None], ["datastore6", "/vmfs/volumes/id-6", "VFFS", None], ]) hypervisor.vim_client = vim_client ds_list = ["datastore1", "datastore2", "datastore3", "datastore4", "datastore5", "datastore6"] image_ds = [{"name": "datastore2", "used_for_vms": False}] ds_manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) expected_call_args = [] for ds in ds_list: for folder in [DISK_FOLDER_NAME, VM_FOLDER_NAME, IMAGE_FOLDER_NAME, TMP_IMAGE_FOLDER_NAME]: expected_call_args.append('/vmfs/volumes/%s/%s' % (ds, folder)) called_args = [c[0][0] for c in mkdir_mock.call_args_list] assert_that(called_args, contains_inanyorder(*expected_call_args)) assert_that(ds_manager.get_datastore_ids(), contains_inanyorder("id-1", "id-2", "id-3", "id-4", "id-5", "id-6")) assert_that(ds_manager.vm_datastores(), contains_inanyorder("id-1", "id-3", "id-4", "id-5", "id-6")) datastores = ds_manager.get_datastores() assert_that(datastores, contains_inanyorder( Datastore("id-1", "datastore1", type=DSType.LOCAL_VMFS, tags=set([LOCAL_VMFS_TAG])), Datastore("id-2", "datastore2", type=DSType.SHARED_VMFS, tags=set([SHARED_VMFS_TAG])), Datastore("id-3", "datastore3", type=DSType.NFS_3, tags=set([NFS_TAG])), Datastore("id-4", "datastore4", type=DSType.NFS_41, tags=set([NFS_TAG])), Datastore("id-5", "datastore5", type=DSType.VSAN, tags=set()), Datastore("id-6", "datastore6", type=DSType.OTHER, tags=set()))) assert_that(ds_manager.image_datastores(), is_(["id-2"])) assert_that(ds_manager.datastore_type("id-1"), is_(DSType.LOCAL_VMFS)) assert_that(ds_manager.datastore_type("id-2"), is_(DSType.SHARED_VMFS)) assert_that(ds_manager.datastore_type("id-3"), is_(DSType.NFS_3)) assert_that(ds_manager.datastore_type("id-4"), is_(DSType.NFS_41)) assert_that(ds_manager.datastore_type("id-5"), is_(DSType.VSAN)) assert_that(ds_manager.datastore_type("id-6"), is_(DSType.OTHER)) # test normalize assert_that(ds_manager.normalize("id-1"), is_("id-1")) assert_that(ds_manager.normalize("datastore1"), is_("id-1"))
def test_get_datastores(self, mkdir_mock): """ Test esx datastore manager with different datastore types. Verify the datastore types are correctly parsed and all the datastores are populated. """ hypervisor = MagicMock() vim_client = MagicMock() dstags = MagicMock() dstags.get.return_value = [] common.services.register(ServiceName.DATASTORE_TAGS, dstags) vim_client.get_datastore.side_effect = self._get_datastore hypervisor.vim_client = vim_client ds_list = ["datastore1", "datastore2", "datastore3", "datastore4", "datastore5", "datastore6"] ds_manager = EsxDatastoreManager(hypervisor, ds_list, set(["datastore2"])) expected_call_args = [] for ds in ds_list: for folder in [DISK_FOLDER_NAME, VM_FOLDER_NAME, IMAGE_FOLDER_NAME, TMP_IMAGE_FOLDER_NAME]: expected_call_args.append('/vmfs/volumes/%s/%s' % (ds, folder)) called_args = [c[0][0] for c in mkdir_mock.call_args_list] assert_that(called_args, equal_to(expected_call_args)) assert_that(ds_manager.get_datastore_ids(), has_length(6)) assert_that(ds_manager.get_datastore_ids(), contains_inanyorder("id-1", "id-2", "id-3", "id-4", "id-5", "id-6")) assert_that(ds_manager.vm_datastores(), has_length(5)) assert_that(ds_manager.vm_datastores(), contains_inanyorder("id-1", "id-3", "id-4", "id-5", "id-6")) datastores = ds_manager.get_datastores() assert_that(datastores[0], is_(Datastore("id-1", "datastore1", type=DSType.LOCAL_VMFS, tags=[LOCAL_VMFS_TAG]))) assert_that(datastores[1], is_(Datastore("id-2", "datastore2", type=DSType.SHARED_VMFS, tags=[SHARED_VMFS_TAG]))) assert_that(datastores[2], is_(Datastore("id-3", "datastore3", type=DSType.NFS_3, tags=[NFS_TAG]))) assert_that(datastores[3], is_(Datastore("id-4", "datastore4", type=DSType.NFS_41, tags=[NFS_TAG]))) assert_that(datastores[4], is_(Datastore("id-5", "datastore5", type=DSType.VSAN, tags=[]))) assert_that(datastores[5], is_(Datastore("id-6", "datastore6", type=DSType.OTHER, tags=[]))) assert_that(ds_manager.image_datastores(), is_(set(["id-2"]))) assert_that(ds_manager.datastore_type("id-1"), is_(DSType.LOCAL_VMFS)) assert_that(ds_manager.datastore_type("id-2"), is_(DSType.SHARED_VMFS)) assert_that(ds_manager.datastore_type("id-3"), is_(DSType.NFS_3)) assert_that(ds_manager.datastore_type("id-4"), is_(DSType.NFS_41)) assert_that(ds_manager.datastore_type("id-5"), is_(DSType.VSAN)) assert_that(ds_manager.datastore_type("id-6"), is_(DSType.OTHER)) # test normalize assert_that(ds_manager.normalize("id-1"), is_("id-1")) assert_that(ds_manager.normalize("datastore1"), is_("id-1"))
def test_get_datastores(self, mkdir_mock): """ Test esx datastore manager with different datastore types. Verify the datastore types are correctly parsed and all the datastores are populated. """ hypervisor = MagicMock() vim_client = MagicMock() dstags = MagicMock() dstags.get.return_value = [] common.services.register(ServiceName.DATASTORE_TAGS, dstags) vim_client.get_datastore.side_effect = self._get_datastore hypervisor.vim_client = vim_client ds_list = [ "datastore1", "datastore2", "datastore3", "datastore4", "datastore5", "datastore6" ] ds_manager = EsxDatastoreManager(hypervisor, ds_list, set(["datastore2"])) expected_call_args = [] for ds in ds_list: for folder in [ DISK_FOLDER_NAME, VM_FOLDER_NAME, IMAGE_FOLDER_NAME, TMP_IMAGE_FOLDER_NAME ]: expected_call_args.append('/vmfs/volumes/%s/%s' % (ds, folder)) called_args = [c[0][0] for c in mkdir_mock.call_args_list] assert_that(called_args, equal_to(expected_call_args)) assert_that(ds_manager.get_datastore_ids(), has_length(6)) assert_that( ds_manager.get_datastore_ids(), contains_inanyorder("id-1", "id-2", "id-3", "id-4", "id-5", "id-6")) assert_that(ds_manager.vm_datastores(), has_length(5)) assert_that( ds_manager.vm_datastores(), contains_inanyorder("id-1", "id-3", "id-4", "id-5", "id-6")) datastores = ds_manager.get_datastores() assert_that( datastores[0], is_( Datastore("id-1", "datastore1", type=DSType.LOCAL_VMFS, tags=[LOCAL_VMFS_TAG]))) assert_that( datastores[1], is_( Datastore("id-2", "datastore2", type=DSType.SHARED_VMFS, tags=[SHARED_VMFS_TAG]))) assert_that( datastores[2], is_( Datastore("id-3", "datastore3", type=DSType.NFS_3, tags=[NFS_TAG]))) assert_that( datastores[3], is_( Datastore("id-4", "datastore4", type=DSType.NFS_41, tags=[NFS_TAG]))) assert_that( datastores[4], is_(Datastore("id-5", "datastore5", type=DSType.VSAN, tags=[]))) assert_that( datastores[5], is_(Datastore("id-6", "datastore6", type=DSType.OTHER, tags=[]))) assert_that(ds_manager.image_datastores(), is_(set(["id-2"]))) assert_that(ds_manager.datastore_type("id-1"), is_(DSType.LOCAL_VMFS)) assert_that(ds_manager.datastore_type("id-2"), is_(DSType.SHARED_VMFS)) assert_that(ds_manager.datastore_type("id-3"), is_(DSType.NFS_3)) assert_that(ds_manager.datastore_type("id-4"), is_(DSType.NFS_41)) assert_that(ds_manager.datastore_type("id-5"), is_(DSType.VSAN)) assert_that(ds_manager.datastore_type("id-6"), is_(DSType.OTHER)) # test normalize assert_that(ds_manager.normalize("id-1"), is_("id-1")) assert_that(ds_manager.normalize("datastore1"), is_("id-1"))
class EsxHypervisor(object): """Manage ESX Hypervisor.""" def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. errback = lambda: suicide() self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=errback) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid # Enable/Disable large page support. If this host is removed # from the deployment, large page support will need to be # explicitly updated by the user. disable_large_pages = agent_config.memory_overcommit > 1.0 self.vim_client.set_large_page_support(disable=disable_large_pages) image_datastores = [ds["name"] for ds in agent_config.image_datastores] self.datastore_manager = EsxDatastoreManager(self, agent_config.datastores, image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer(self.vim_client, image_datastores) atexit.register(self.image_manager.cleanup) @property def uuid(self): return self._uuid @property def config(self): config = gen.hypervisor.esx.ttypes.EsxConfig() return TSerialization.serialize(config) def normalized_load(self): """ Return the maximum of the normalized memory/cpu loads""" memory = self.system.memory_info() memory_load = memory.used * 100 / memory.total # get average cpu load percentage in past 20 seconds # since hostd takes a sample in every 20 seconds # we use the min 20secs here to get the latest # CPU active average over 1 minute host_stats = copy.copy(self.vim_client.get_perf_manager_stats(20)) cpu_load = host_stats['rescpu.actav1'] / 100 return max(memory_load, cpu_load) def check_image(self, image_id, datastore_id): return self.image_manager.check_image( image_id, self.datastore_manager.datastore_name(datastore_id)) def acquire_vim_ticket(self): return self.vim_client.acquire_clone_ticket() def acquire_cgi_ticket(self, url, op): return self.vim_client.acquire_cgi_ticket(url, op) def add_update_listener(self, listener): self.vim_client.add_update_listener(listener) def remove_update_listener(self, listener): self.vim_client.remove_update_listener(listener) def transfer_image(self, source_image_id, source_datastore, destination_image_id, destination_datastore, host, port): return self.image_transferer.send_image_to_host( source_image_id, source_datastore, destination_image_id, destination_datastore, host, port) def receive_image(self, image_id, datastore, imported_vm_name): self.image_manager.receive_image(image_id, datastore, imported_vm_name)
def test_get_datastores(self): """ Test esx datastore manager with different datastore types. Verify the datastore types are correctly parsed and all the datastores are populated. """ hypervisor = MagicMock() vim_client = MagicMock() vim_client.get_all_datastores.return_value = self.get_datastore_mock([ # name, url, type, local ["datastore1", "/vmfs/volumes/id-1", "VMFS", True], ["datastore2", "/vmfs/volumes/id-2", "VMFS", False], ["datastore3", "/vmfs/volumes/id-3", "NFS", None], ["datastore4", "/vmfs/volumes/id-4", "NFSV41", None], ["datastore5", "/vmfs/volumes/id-5", "vsan", None], ["datastore6", "/vmfs/volumes/id-6", "VFFS", None], ]) hypervisor.vim_client = vim_client ds_list = ["datastore1", "datastore2", "datastore3", "datastore4", "datastore5", "datastore6"] image_ds = [{"name": "datastore2", "used_for_vms": False}] ds_manager = EsxDatastoreManager(hypervisor, ds_list, image_ds) assert_that(ds_manager.get_datastore_ids(), contains_inanyorder("id-1", "id-2", "id-3", "id-4", "id-5", "id-6")) assert_that(ds_manager.vm_datastores(), contains_inanyorder("id-1", "id-3", "id-4", "id-5", "id-6")) datastores = ds_manager.get_datastores() assert_that(datastores, contains_inanyorder( Datastore("id-1", "datastore1", type=DSType.LOCAL_VMFS, tags=set([LOCAL_VMFS_TAG])), Datastore("id-2", "datastore2", type=DSType.SHARED_VMFS, tags=set([SHARED_VMFS_TAG])), Datastore("id-3", "datastore3", type=DSType.NFS_3, tags=set([NFS_TAG])), Datastore("id-4", "datastore4", type=DSType.NFS_41, tags=set([NFS_TAG])), Datastore("id-5", "datastore5", type=DSType.VSAN, tags=set([VSAN_TAG])), Datastore("id-6", "datastore6", type=DSType.OTHER, tags=set()))) assert_that(ds_manager.image_datastores(), is_(["id-2"])) assert_that(ds_manager.datastore_type("id-1"), is_(DSType.LOCAL_VMFS)) assert_that(ds_manager.datastore_type("id-2"), is_(DSType.SHARED_VMFS)) assert_that(ds_manager.datastore_type("id-3"), is_(DSType.NFS_3)) assert_that(ds_manager.datastore_type("id-4"), is_(DSType.NFS_41)) assert_that(ds_manager.datastore_type("id-5"), is_(DSType.VSAN)) assert_that(ds_manager.datastore_type("id-6"), is_(DSType.OTHER)) # test normalize assert_that(ds_manager.normalize("id-1"), is_("id-1")) assert_that(ds_manager.normalize("datastore1"), is_("id-1"))
class EsxHypervisor(object): """Manage ESX Hypervisor.""" def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. errback = lambda: suicide() self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=errback) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid self.set_memory_overcommit(agent_config.memory_overcommit) image_datastores = [ds["name"] for ds in agent_config.image_datastores] self.datastore_manager = EsxDatastoreManager( self, agent_config.datastores, agent_config.image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer(self.vim_client, image_datastores) atexit.register(self.image_manager.cleanup) @property def uuid(self): return self._uuid @property def config(self): config = gen.hypervisor.esx.ttypes.EsxConfig() return TSerialization.serialize(config) def normalized_load(self): """ Return the maximum of the normalized memory/cpu loads""" memory = self.system.memory_info() memory_load = memory.used * 100 / memory.total # get average cpu load percentage in past 20 seconds # since hostd takes a sample in every 20 seconds # we use the min 20secs here to get the latest # CPU active average over 1 minute host_stats = copy.copy(self.vim_client.get_perf_manager_stats(20)) cpu_load = host_stats['rescpu.actav1'] / 100 return max(memory_load, cpu_load) def check_image(self, image_id, datastore_id): return self.image_manager.check_image( image_id, self.datastore_manager.datastore_name(datastore_id) ) def acquire_vim_ticket(self): return self.vim_client.acquire_clone_ticket() def acquire_cgi_ticket(self, url, op): return self.vim_client.acquire_cgi_ticket(url, op) def add_update_listener(self, listener): self.vim_client.add_update_listener(listener) def remove_update_listener(self, listener): self.vim_client.remove_update_listener(listener) def transfer_image(self, source_image_id, source_datastore, destination_image_id, destination_datastore, host, port): return self.image_transferer.send_image_to_host( source_image_id, source_datastore, destination_image_id, destination_datastore, host, port) def receive_image(self, image_id, datastore, imported_vm_name): self.image_manager.receive_image( image_id, datastore, imported_vm_name) def set_memory_overcommit(self, memory_overcommit): # Enable/Disable large page support. If this host is removed # from the deployment, large page support will need to be # explicitly updated by the user. disable_large_pages = memory_overcommit > 1.0 self.vim_client.set_large_page_support(disable=disable_large_pages)
class EsxHypervisor(object): """Manage ESX Hypervisor.""" def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. self.vim_client = VimClient(wait_timeout=agent_config.wait_timeout, errback=lambda: suicide()) atexit.register(lambda client: client.disconnect(), self.vim_client) self._uuid = self.vim_client.host_uuid self.datastore_manager = EsxDatastoreManager( self, agent_config.datastores, agent_config.image_datastores) # datastore manager needs to update the cache when there is a change. self.vim_client.add_update_listener(self.datastore_manager) self.vm_manager = EsxVmManager(self.vim_client, self.datastore_manager) self.disk_manager = EsxDiskManager(self.vim_client, self.datastore_manager) self.image_manager = EsxImageManager(self.vim_client, self.datastore_manager) self.network_manager = EsxNetworkManager(self.vim_client, agent_config.networks) self.system = EsxSystem(self.vim_client) self.image_manager.monitor_for_cleanup() self.image_transferer = HttpNfcTransferer( self.vim_client, self.datastore_manager.image_datastores()) atexit.register(self.image_manager.cleanup) @property def uuid(self): return self._uuid def check_image(self, image_id, datastore_id): return self.image_manager.check_image( image_id, self.datastore_manager.datastore_name(datastore_id) ) def acquire_vim_ticket(self): return self.vim_client.acquire_clone_ticket() def add_update_listener(self, listener): self.vim_client.add_update_listener(listener) def remove_update_listener(self, listener): self.vim_client.remove_update_listener(listener) def transfer_image(self, source_image_id, source_datastore, destination_image_id, destination_datastore, host, port): return self.image_transferer.send_image_to_host( source_image_id, source_datastore, destination_image_id, destination_datastore, host, port) def prepare_receive_image(self, image_id, datastore): return self.image_manager.prepare_receive_image(image_id, datastore) def receive_image(self, image_id, datastore, imported_vm_name, metadata): self.image_manager.receive_image(image_id, datastore, imported_vm_name, metadata) def set_memory_overcommit(self, memory_overcommit): # Enable/Disable large page support. If this host is removed # from the deployment, large page support will need to be # explicitly updated by the user. disable_large_pages = memory_overcommit > 1.0 self.vim_client.set_large_page_support(disable=disable_large_pages)