Exemplo n.º 1
0
 def __init__(self, vim_client, image_datastores, host_name="localhost"):
     super(HttpNfcTransferer, self).__init__(vim_client)
     self.lock = threading.Lock()
     self._lease_url_host_name = host_name
     self._image_datastores = image_datastores
     self._vm_config = EsxVmConfig(self._vim_client)
     self._vm_manager = EsxVmManager(self._vim_client, None)
Exemplo n.º 2
0
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(self.host, "root", self.pwd,
                                    auto_sync=True)
        self.vm_config = EsxVmConfig(self.vim_client)
        self._logger = logging.getLogger(__name__)
Exemplo n.º 3
0
 def __init__(self, vim_client, ds_manager):
     self.vim_client = vim_client
     self.vm_config = EsxVmConfig(vim_client)
     self._logger = logging.getLogger(__name__)
     self._ds_manager = ds_manager
     self._lock = threading.Lock()
     self._datastore_cache = {}
 def __init__(self, vim_client, image_datastores, host_name="localhost"):
     super(HttpNfcTransferer, self).__init__(vim_client)
     self.lock = threading.Lock()
     self._lease_url_host_name = host_name
     self._image_datastores = image_datastores
     self._vm_config = EsxVmConfig(self._vim_client)
     self._vm_manager = EsxVmManager(self._vim_client, None)
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(self.host, "root", self.pwd,
                                    auto_sync=True)
        self.vm_config = EsxVmConfig(self.vim_client)
        self._logger = logging.getLogger(__name__)
 def setUp(self, connect, update, creds):
     creds.return_value = ["username", "password"]
     self.vim_client = VimClient(auto_sync=False)
     with patch("host.hypervisor.esx.vm_config.GetEnv"):
         self.vm_config = EsxVmConfig(self.vim_client)
class TestEsxVmConfig(unittest.TestCase):
    @patch.object(VimClient, "acquire_credentials")
    @patch.object(VimClient, "update_cache")
    @patch("pysdk.connect.Connect")
    def setUp(self, connect, update, creds):
        creds.return_value = ["username", "password"]
        self.vim_client = VimClient(auto_sync=False)
        with patch("host.hypervisor.esx.vm_config.GetEnv"):
            self.vm_config = EsxVmConfig(self.vim_client)

    def tearDown(self):
        self.vim_client.disconnect(wait=True)

    def dummy_devices(self):
        return [
            vim.vm.device.VirtualFloppy(key=10),
            vim.vm.device.VirtualPCIController(key=100),
            DEFAULT_DISK_CONTROLLER_CLASS(key=1000),
            vim.vm.device.VirtualSoundCard(key=10000),
        ]

    def test_vm_create_spec(self):
        datastore = "ds1"
        vm_id = str(uuid.uuid4())
        metadata = {
            "configuration": {"guestOS": "otherLinuxGuest"},
            "parameters": [{"name": "key1"}, {"name": "key2"}]
        }
        env = {
            "key1": "value1",
            "keyUnexpected": "valueNotSet",
        }
        spec = self.vm_config.create_spec(vm_id, datastore, 512, 1, metadata,
                                          env)
        assert_that(spec.memoryMB, equal_to(512))
        assert_that(spec.numCPUs, equal_to(1))
        assert_that(spec.name, equal_to(vm_id))
        assert_that(spec.guestId, equal_to("otherLinuxGuest"))
        expected_metadata = {'guestOS': 'otherLinuxGuest', 'key1': 'value1'}
        assert_that(spec._metadata, equal_to(expected_metadata))

    def test_create_nic_spec(self):
        net_name = "VM_network"
        cspec = self.vm_config.update_spec()
        spec = self.vm_config.add_nic(cspec, net_name)
        backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo
        assert_that(spec.deviceChange[0].device.backing.__class__,
                    equal_to(backing))
        assert_that(spec.deviceChange[0].device.backing.deviceName,
                    equal_to(net_name))

    def test_find_disk_controller(self):
        devices = self.dummy_devices()
        device_type = DEFAULT_DISK_CONTROLLER_CLASS
        disk_controller = self.vm_config.find_device(devices, device_type)
        assert_that(disk_controller.key, equal_to(1000))

    def test_find_nic_controller(self):
        devices = self.dummy_devices()
        device_type = vim.vm.device.VirtualPCIController
        disk_controller = self.vm_config.find_device(devices, device_type)
        assert_that(disk_controller.key, equal_to(100))

    def test_find_virtual_disk(self):
        spec = vim.vm.ConfigSpec()
        vm_config = self.vm_config
        devices = self.dummy_devices()
        for device in devices:
            vm_config.add_device(spec, device)
        cfg_info = FakeConfigInfo()
        device_type = vim.vm.device.VirtualDisk
        datastore = "ds1"
        filename = "folder/foo"
        path = vmdk_path(datastore, filename)

        find_disk = vm_config.disk_matcher(datastore, filename)
        disk = vm_config.find_device(devices, device_type, matcher=find_disk)
        assert_that(disk, equal_to(None))

        vm_config.add_scsi_disk(cfg_info, spec, datastore, "nope")

        self.assertRaises(DeviceNotFoundException, vm_config.get_device,
                          devices, device_type, matcher=find_disk)

        vm_config.add_scsi_disk(cfg_info, spec, datastore,
                                filename)
        device_changes = spec.deviceChange
        device_list = []
        for device_change in device_changes:
            device_list.append(device_change.device)

        disk = vm_config.find_device(device_list, device_type,
                                     matcher=find_disk)
        assert_that(disk.backing.fileName, equal_to(path))

    def _create_spec_for_disk_test(self, datastore, vm_id):
        spec = vim.vm.ConfigSpec()
        devices = self.dummy_devices()
        for device in devices:
            self.vm_config.add_device(spec, device)
        vm_path_name = '[%s] %s/%s' % (datastore, vm_id[0:2], vm_id)
        spec.files = vim.vm.FileInfo(vmPathName=vm_path_name)
        spec.name = vm_id
        return spec

    def test_create_empty_disk(self):
        vm_id = str(uuid.uuid4())
        datastore = "ds1"
        spec = self._create_spec_for_disk_test(datastore, vm_id)

        size_mb = 100
        disk_id = str(uuid.uuid4())
        self.vm_config.create_empty_disk(spec, datastore, disk_id, size_mb)

        devs = [change.device for change in spec.deviceChange]
        device_type = vim.vm.device.VirtualDisk
        disks = self.vm_config.find_devices(devs, device_type)
        assert_that(len(disks), equal_to(1))
        # verify that uuid to be set on disk to be added matches the
        # of the disk (modulo some formatting differences)
        assert_that(disks[0].backing.uuid,
                    equal_to(uuid_to_vmdk_uuid(disk_id)))

    def test_create_child_disk(self):
        vm_id = str(uuid.uuid4())
        datastore = "ds1"
        spec = self._create_spec_for_disk_test(datastore, vm_id)

        disk_id = str(uuid.uuid4())
        parent_id = str(uuid.uuid4())
        self.vm_config.create_child_disk(spec, datastore, disk_id, parent_id)

        devs = [change.device for change in spec.deviceChange]
        device_type = vim.vm.device.VirtualDisk
        disks = self.vm_config.find_devices(devs, device_type)
        assert_that(len(disks), equal_to(1))
        # verify that disk to be added does not request a specifc uuid
        assert_that(disks[0].backing.uuid, equal_to(None))

    def _get_config_info_with_iso(self, iso_path):
        devices = self.dummy_devices()
        cfg_info = FakeConfigInfo()
        cfg_info.hardware.device = devices

        cdrom = vim.vm.device.VirtualCdrom()
        cdrom.key = 1234
        cdrom.controllerKey = 100
        cdrom.unitNumber = 1

        iso_backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
        iso_backing.fileName = iso_path
        cdrom.backing = iso_backing

        conInfo = vim.vm.device.VirtualDevice.ConnectInfo()
        conInfo.allowGuestControl = True
        conInfo.connected = True
        conInfo.startConnected = True
        cdrom.connectable = conInfo
        cfg_info.hardware.device.append(cdrom)
        return cfg_info

    def _get_config_info_without_connected(self, is_iso_backing):
        devices = self.dummy_devices()
        cfg_info = FakeConfigInfo()
        cfg_info.hardware.device = devices

        cdrom = vim.vm.device.VirtualCdrom()
        cdrom.key = 1234
        cdrom.controllerKey = 100
        cdrom.unitNumber = 1

        if is_iso_backing:
            iso_backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
            cdrom.backing = iso_backing

        conInfo = vim.vm.device.VirtualDevice.ConnectInfo()
        conInfo.allowGuestControl = True
        conInfo.connected = False
        conInfo.startConnected = True
        cdrom.connectable = conInfo
        cfg_info.hardware.device.append(cdrom)
        return cfg_info

    def test_add_iso_cdrom(self):
        virtual_ide_controller = vim.vm.device.VirtualIDEController()
        cfgOption = vim.vm.ConfigOption()
        cfgOption.defaultDevice.append(virtual_ide_controller)
        self.vm_config._cfg_opts = cfgOption
        # fake iso ds path
        fake_iso_ds_path = '[ds] vm_fake/fake.iso'

        # test if no virtual cdrom attached to the VM
        cfg_info = FakeConfigInfo()

        cspec = self.vm_config.update_spec()

        result = self.vm_config.add_iso_cdrom(
            cspec,
            fake_iso_ds_path,
            cfg_info)

        assert_that(result.__class__,
                    equal_to(bool))
        assert_that(result, equal_to(True))

        dev = cspec.deviceChange[0].device
        assert_that(len(cspec.deviceChange), equal_to(1))
        assert_that(dev.connectable.connected, equal_to(True))
        assert_that(dev.connectable.startConnected, equal_to(True))
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))

        # test if virtual cdrom exist and ISO already attached to the VM
        cspec = self.vm_config.update_spec()

        cfg_info = self._get_config_info_with_iso(fake_iso_ds_path)

        result = self.vm_config.add_iso_cdrom(
            cspec,
            fake_iso_ds_path,
            cfg_info)

        assert_that(result.__class__,
                    equal_to(bool))
        assert_that(result, equal_to(False))

        # test if virtual cdrom exist and it's iso_backing
        # and ISO is not attached to the VM
        cspec = self.vm_config.update_spec()

        cfg_info = self._get_config_info_without_connected(is_iso_backing=True)

        result = self.vm_config.add_iso_cdrom(
            cspec,
            fake_iso_ds_path,
            cfg_info)

        assert_that(result.__class__,
                    equal_to(bool))
        assert_that(result, equal_to(True))

        dev = cspec.deviceChange[0].device
        assert_that(len(cspec.deviceChange), equal_to(1))
        assert_that(dev.connectable.connected, equal_to(True))
        assert_that(dev.connectable.startConnected, equal_to(True))
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))

        # test if virtual cdrom exist and it's _not_ iso_backing
        # and ISO is not attached to the VM
        cspec = self.vm_config.update_spec()

        cfg_info = self._get_config_info_without_connected(
            is_iso_backing=False)

        self.assertRaises(TypeError,
                          self.vm_config.add_iso_cdrom,
                          cspec, fake_iso_ds_path, cfg_info)

    def test_disconnect_iso(self):
        # on vm config with no cdrom devices
        cfg_info = FakeConfigInfo()
        cspec = self.vm_config.update_spec()
        self.assertRaises(DeviceNotFoundException,
                          self.vm_config.disconnect_iso_cdrom,
                          cspec, cfg_info)
        assert_that(len(cspec.deviceChange), equal_to(0))

        # on vm config with no a fake cdrom device
        fake_iso_ds_path = '[ds] vm_fake/fake.iso'
        cspec = self.vm_config.update_spec()
        cfg_info = self._get_config_info_with_iso(fake_iso_ds_path)
        iso_path = self.vm_config.disconnect_iso_cdrom(cspec, cfg_info)

        assert_that(len(cspec.deviceChange), equal_to(1))
        dev = cspec.deviceChange[0].device
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))
        assert_that(dev.backing.fileName,
                    equal_to(fake_iso_ds_path))

        assert_that(iso_path, equal_to(fake_iso_ds_path))
        assert_that(dev.connectable.connected, equal_to(False))
        assert_that(dev.connectable.startConnected, equal_to(False))

    def test_remove_iso_cdrom_device(self):
        fake_iso_ds_path = '[ds] vm_fake/fake.iso'
        cspec = self.vm_config.update_spec()
        cfg_info = self._get_config_info_with_iso(fake_iso_ds_path)
        self.vm_config.remove_iso_cdrom(cspec, cfg_info)

        assert_that(len(cspec.deviceChange), equal_to(1))
        assert_that(cspec.deviceChange[0].operation, equal_to('remove'))
        dev = cspec.deviceChange[0].device
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))
        assert_that(dev.backing.fileName,
                    equal_to(fake_iso_ds_path))

    def test_update_spec(self):
        cfg_info = FakeConfigInfo()
        spec = self.vm_config.update_spec()
        assert_that(len(spec.deviceChange), equal_to(0))
        net_name = "VM_Network"
        self.vm_config.add_nic(spec, net_name)
        assert_that(len(spec.deviceChange), equal_to(1))
        self.vm_config.add_scsi_disk(cfg_info, spec, "ds1", "foo")
        # One for the controller and one for the disk itself.
        assert_that(len(spec.deviceChange), equal_to(3))

    def test_path_conversion_invalid(self):
        self.assertRaises(IndexError, datastore_to_os_path, "invalid_ds_path")

    @parameterized.expand([
        ('[foo] a/b/c.vmdk', '/vmfs/volumes/foo/a/b/c.vmdk'),
        ('[foo] c.vmdk', '/vmfs/volumes/foo/c.vmdk'),
        ('[foo]a', '/vmfs/volumes/foo/a'),
        ('/vmfs/volumes/foo/bar.vmdk', '/vmfs/volumes/foo/bar.vmdk'),
        ('[]/vmfs/volumes/foo/bar.vmdk', '/vmfs/volumes/foo/bar.vmdk'),
        ('[] /vmfs/volumes/foo/bar.vmdk', '/vmfs/volumes/foo/bar.vmdk')
    ])
    def test_path_conversion(self, ds_path, expected_os_path):
        path = datastore_to_os_path(ds_path)
        assert_that(path, equal_to(expected_os_path))

    @parameterized.expand([
        (['[foo] image_a_b/c.vmdk'], True, False, False),
        (['[foo] vm_a_b/c.vmdk'], False, True, False),
        (['[foo] image_a_b/c.vmdk', '[foo] vm/a.vmdk'], False, True, False),
        (['[foo] disk_a_b/c.vmdk'], False, False, True),
        (['[foo] image_a/c.vmdk', '[foo] disk/a.vmdk'], False, False, True),
        ([], False, False, False)
    ])
    def test_is_what_disk(self, disk_files, image, ephemeral, persistent):
        assert_that(is_image(disk_files), equal_to(image))
        assert_that(is_ephemeral_disk(disk_files), equal_to(ephemeral))
        assert_that(is_persistent_disk(disk_files), equal_to(persistent))

    def test_vmdk_uuid_conversion(self):
        for id in ['01234567-89ab-cedf-0123-456789abcdef',
                   '01 23456 789ABCEDF0123456789ABCDEF',
                   '01 23 45 67 89 ab ce df-01 23 45 67 89 ab cd ef',
                   '0123456789abcedf0123456789abcdef']:
            vmdk_uuid = uuid_to_vmdk_uuid(id)
            assert_that(
                vmdk_uuid,
                equal_to('01 23 45 67 89 ab ce df-01 23 45 67 89 ab cd ef'))
        for id in ['',
                   '01234567-89ab-cedf-0123-456789abcd',
                   '01 23456 789abcedf0123456789abcdefabcd']:
            self.assertRaises(ValueError, uuid_to_vmdk_uuid, id)
Exemplo n.º 8
0
class HttpNfcTransferer(HttpTransferer):
    """ Class for handling HTTP-based disk transfers between ESX hosts.

    This class employs the ImportVApp and ExportVM APIs to transfer
    VMDKs efficiently to another host. A shadow VM is created and used in the
    initial export of the VMDK into the stream optimized format needed by
    ImportVApp.

    """

    LEASE_INITIALIZATION_WAIT_SECS = 10

    def __init__(self, vim_client, image_datastores, host_name="localhost"):
        super(HttpNfcTransferer, self).__init__(vim_client)
        self.lock = threading.Lock()
        self._lease_url_host_name = host_name
        self._image_datastores = image_datastores
        self._vm_config = EsxVmConfig(self._vim_client)
        self._vm_manager = EsxVmManager(self._vim_client, None)

    def _create_remote_vim_client(self, agent_client, host):
        request = ServiceTicketRequest(service_type=ServiceType.VIM)
        response = agent_client.get_service_ticket(request)
        if response.result != ServiceTicketResultCode.OK:
            self._logger.info("Get service ticket failed. Response = %s" %
                              str(response))
            raise ValueError("No ticket")
        vim_client = VimClient(host=host,
                               ticket=response.vim_ticket,
                               auto_sync=False)
        return vim_client

    def _get_disk_url_from_lease(self, lease):
        for dev_url in lease.info.deviceUrl:
            self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url))
            return dev_url.url

    def _wait_for_lease(self, lease):
        retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS
        state = None
        while retries > 0:
            state = lease.state
            if state != vim.HttpNfcLease.State.initializing:
                break
            retries -= 1
            time.sleep(1)

        if retries == 0:
            self._logger.debug("Nfc lease initialization timed out")
            raise NfcLeaseInitiatizationTimeout()
        if state == vim.HttpNfcLease.State.error:
            self._logger.debug("Fail to initialize nfc lease: %s" %
                               str(lease.error))
            raise NfcLeaseInitiatizationError()

    def _ensure_host_in_url(self, url, actual_host):

        # URLs from vApp export/import leases have '*' as placeholder
        # for host names that has to be replaced with the actual
        # host on which the resource resides.
        protocol, host, selector = self._split_url(url)
        if host.find("*") != -1:
            host = host.replace("*", actual_host)
        return "%s://%s%s" % (protocol, host, selector)

    def _export_shadow_vm(self, shadow_vm_id):
        """ Initiates the Export VM operation.

        The lease created as part of ExportVM contains, among other things,
        the url to the stream-optimized disk of the image currently associated
        with the VM being exported.
        """
        vm = self._vim_client.get_vm_obj_in_cache(shadow_vm_id)
        lease = vm.ExportVm()
        self._wait_for_lease(lease)
        return lease, self._get_disk_url_from_lease(lease)

    def _get_shadow_vm_datastore(self):
        # The datastore in which the shadow VM will be created.
        return self._image_datastores[0]

    def _create_shadow_vm(self):
        """ Creates a shadow vm specifically for use by this host.

        The shadow VM created is used to facilitate host-to-host transfer
        of any image accessible on this host to another datastore not directly
        accessible from this host.
        """
        shadow_vm_id = SHADOW_VM_NAME_PREFIX + str(uuid.uuid4())
        spec = self._vm_config.create_spec(
            vm_id=shadow_vm_id,
            datastore=self._get_shadow_vm_datastore(),
            memory=32,
            cpus=1)
        try:
            self._vm_manager.create_vm(shadow_vm_id, spec)
        except Exception:
            self._logger.exception("Error creating vm with id %s" %
                                   shadow_vm_id)
            raise
        return shadow_vm_id

    def _delete_shadow_vm(self, shadow_vm_id):
        try:
            # detach disk so it is not deleted along with vm
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(shadow_vm_id)
            self._vm_manager.remove_all_disks(spec, info)
            self._vm_manager.update_vm(shadow_vm_id, spec)

            # delete the vm
            self._vm_manager.delete_vm(shadow_vm_id, force=True)
        except Exception:
            self._logger.exception("Error deleting vm with id %s" %
                                   shadow_vm_id)

    def _configure_shadow_vm_with_disk(self, image_id, image_datastore,
                                       shadow_vm_id):
        """ Reconfigures the shadow vm to contain only one image disk. """
        try:
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(shadow_vm_id)
            self._vm_manager.add_disk(spec,
                                      image_datastore,
                                      image_id,
                                      info,
                                      disk_is_image=True)
            self._vm_manager.update_vm(shadow_vm_id, spec)
        except Exception:
            self._logger.exception(
                "Error configuring shadow vm with image %s" % image_id)
            raise

    def _get_image_stream_from_shadow_vm(self, image_id, image_datastore,
                                         shadow_vm_id):
        """ Obtain a handle to the streamOptimized disk from shadow vm.

        The stream-optimized disk is obtained via configuring a shadow
        VM with the image disk we are interested in and exporting the
        reconfigured shadow VM.

        """
        self._configure_shadow_vm_with_disk(image_id, image_datastore,
                                            shadow_vm_id)
        lease, disk_url = self._export_shadow_vm(shadow_vm_id)
        disk_url = self._ensure_host_in_url(disk_url,
                                            self._lease_url_host_name)
        return lease, disk_url

    def _prepare_receive_image(self, agent_client, image_id, datastore):
        request = PrepareReceiveImageRequest(image_id, datastore)
        response = agent_client.prepare_receive_image(request)
        if response.result != PrepareReceiveImageResultCode.OK:
            err_msg = "Failed to prepare receive image. Response = %s" % str(
                response)
            self._logger.info(err_msg)
            raise ValueError(err_msg)
        return response.import_vm_path, response.import_vm_id

    def _create_import_vm_spec(self, vm_id, datastore, vm_path):
        spec = EsxVmConfigSpec(vm_id, "otherGuest", 32, 1, vm_path, None)
        # Just specify a tiny capacity in the spec for now; the eventual vm
        # disk will be based on what is uploaded via the http nfc url.
        spec = self._vm_manager.create_empty_disk(spec,
                                                  datastore,
                                                  None,
                                                  size_mb=1)

        import_spec = vim.vm.VmImportSpec(configSpec=spec)
        return import_spec

    def _get_url_from_import_vm(self, dst_vim_client, import_spec):
        vm_folder = dst_vim_client.vm_folder
        root_rp = dst_vim_client.root_resource_pool
        lease = root_rp.ImportVApp(import_spec, vm_folder)
        self._wait_for_lease(lease)
        disk_url = self._get_disk_url_from_lease(lease)
        disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host)
        return lease, disk_url

    def _register_imported_image_at_host(self, agent_client, image_id,
                                         destination_datastore,
                                         imported_vm_name, metadata):
        """ Installs an image at another host.

        Image data was transferred via ImportVApp to said host.
        """

        request = ReceiveImageRequest(image_id=image_id,
                                      datastore_id=destination_datastore,
                                      transferred_image_id=imported_vm_name,
                                      metadata=metadata)

        response = agent_client.receive_image(request)
        if response.result == ReceiveImageResultCode.DESTINATION_ALREADY_EXIST:
            raise DiskAlreadyExistException(response.error)
        if response.result != ReceiveImageResultCode.OK:
            raise ReceiveImageException(response.result, response.error)

    def _read_metadata(self, image_datastore, image_id):
        try:
            # Transfer raw metadata
            metadata_path = os_metadata_path(image_datastore, image_id,
                                             IMAGE_FOLDER_NAME_PREFIX)
            metadata = None
            if os.path.exists(metadata_path):
                with open(metadata_path, 'r') as f:
                    metadata = f.read()

            return metadata
        except:
            self._logger.exception("Failed to read metadata")
            raise

    def _send_image(self, agent_client, host, tmp_path, spec):
        vim_client = self._create_remote_vim_client(agent_client, host)
        try:
            write_lease, disk_url = self._get_url_from_import_vm(
                vim_client, spec)
            try:
                self.upload_file(tmp_path, disk_url, write_lease)
            finally:
                write_lease.Complete()
        finally:
            vim_client.disconnect()

    @lock_non_blocking
    def send_image_to_host(self, image_id, image_datastore,
                           destination_image_id, destination_datastore, host,
                           port):
        if destination_image_id is None:
            destination_image_id = image_id
        metadata = self._read_metadata(image_datastore, image_id)

        shadow_vm_id = self._create_shadow_vm()

        # place transfer.vmdk under shadow_vm_path to work around VSAN's restriction on
        # files at datastore top-level
        shadow_vm_path = os_datastore_path(
            self._get_shadow_vm_datastore(),
            compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id))
        transfer_vmdk_path = os.path.join(shadow_vm_path, "transfer.vmdk")
        self._logger.info("transfer_vmdk_path = %s" % transfer_vmdk_path)

        agent_client = None
        try:
            read_lease, disk_url = self._get_image_stream_from_shadow_vm(
                image_id, image_datastore, shadow_vm_id)

            try:
                self.download_file(disk_url, transfer_vmdk_path, read_lease)
            finally:
                read_lease.Complete()

            agent_client = DirectClient("Host", Host.Client, host, port)
            agent_client.connect()

            vm_path, vm_id = self._prepare_receive_image(
                agent_client, destination_image_id, destination_datastore)
            spec = self._create_import_vm_spec(vm_id, destination_datastore,
                                               vm_path)

            self._send_image(agent_client, host, transfer_vmdk_path, spec)
            self._register_imported_image_at_host(agent_client,
                                                  destination_image_id,
                                                  destination_datastore, vm_id,
                                                  metadata)

            return vm_id
        finally:
            try:
                os.unlink(transfer_vmdk_path)
            except OSError:
                pass
            self._delete_shadow_vm(shadow_vm_id)
            rm_rf(shadow_vm_path)
            if agent_client:
                agent_client.close()
 def setUp(self, connect, update, creds):
     creds.return_value = ["username", "password"]
     self.vim_client = VimClient(auto_sync=False)
     with patch("host.hypervisor.esx.vm_config.GetEnv"):
         self.vm_config = EsxVmConfig(self.vim_client)
Exemplo n.º 10
0
class TestEsxVmConfig(unittest.TestCase):
    @patch.object(VimClient, "acquire_credentials")
    @patch.object(VimClient, "update_cache")
    @patch("pysdk.connect.Connect")
    def setUp(self, connect, update, creds):
        creds.return_value = ["username", "password"]
        self.vim_client = VimClient(auto_sync=False)
        with patch("host.hypervisor.esx.vm_config.GetEnv"):
            self.vm_config = EsxVmConfig(self.vim_client)

    def tearDown(self):
        self.vim_client.disconnect(wait=True)

    def dummy_devices(self):
        return [
            vim.vm.device.VirtualFloppy(key=10),
            vim.vm.device.VirtualPCIController(key=100),
            DEFAULT_DISK_CONTROLLER_CLASS(key=1000),
            vim.vm.device.VirtualSoundCard(key=10000),
        ]

    def test_vm_create_spec(self):
        datastore = "ds1"
        vm_id = str(uuid.uuid4())
        metadata = {
            "configuration": {"guestOS": "otherLinuxGuest"},
            "parameters": [{"name": "key1"}, {"name": "key2"}]
        }
        env = {
            "key1": "value1",
            "keyUnexpected": "valueNotSet",
        }
        spec = self.vm_config.create_spec(vm_id, datastore, 512, 1, metadata,
                                          env)
        assert_that(spec.memoryMB, equal_to(512))
        assert_that(spec.numCPUs, equal_to(1))
        assert_that(spec.name, equal_to(vm_id))
        assert_that(spec.guestId, equal_to("otherLinuxGuest"))
        expected_metadata = {'guestOS': 'otherLinuxGuest', 'key1': 'value1'}
        assert_that(spec._metadata, equal_to(expected_metadata))

    def test_create_nic_spec(self):
        net_name = "VM_network"
        cspec = self.vm_config.update_spec()
        spec = self.vm_config.add_nic(cspec, net_name)
        backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo
        assert_that(spec.deviceChange[0].device.backing.__class__,
                    equal_to(backing))
        assert_that(spec.deviceChange[0].device.backing.deviceName,
                    equal_to(net_name))

    def test_find_disk_controller(self):
        devices = self.dummy_devices()
        device_type = DEFAULT_DISK_CONTROLLER_CLASS
        disk_controller = self.vm_config.find_device(devices, device_type)
        assert_that(disk_controller.key, equal_to(1000))

    def test_find_nic_controller(self):
        devices = self.dummy_devices()
        device_type = vim.vm.device.VirtualPCIController
        disk_controller = self.vm_config.find_device(devices, device_type)
        assert_that(disk_controller.key, equal_to(100))

    def test_find_virtual_disk(self):
        spec = vim.vm.ConfigSpec()
        vm_config = self.vm_config
        devices = self.dummy_devices()
        for device in devices:
            vm_config.add_device(spec, device)
        cfg_info = FakeConfigInfo()
        device_type = vim.vm.device.VirtualDisk
        datastore = "ds1"
        filename = "folder/foo"
        path = vmdk_path(datastore, filename)

        find_disk = vm_config.disk_matcher(datastore, filename)
        disk = vm_config.find_device(devices, device_type, matcher=find_disk)
        assert_that(disk, equal_to(None))

        vm_config.add_scsi_disk(cfg_info, spec, datastore, "nope")

        self.assertRaises(DeviceNotFoundException, vm_config.get_device,
                          devices, device_type, matcher=find_disk)

        vm_config.add_scsi_disk(cfg_info, spec, datastore,
                                filename)
        device_changes = spec.deviceChange
        device_list = []
        for device_change in device_changes:
            device_list.append(device_change.device)

        disk = vm_config.find_device(device_list, device_type,
                                     matcher=find_disk)
        assert_that(disk.backing.fileName, equal_to(path))

    def _create_spec_for_disk_test(self, datastore, vm_id):
        spec = vim.vm.ConfigSpec()
        devices = self.dummy_devices()
        for device in devices:
            self.vm_config.add_device(spec, device)
        vm_path_name = '[%s] %s/%s' % (datastore, vm_id[0:2], vm_id)
        spec.files = vim.vm.FileInfo(vmPathName=vm_path_name)
        spec.name = vm_id
        return spec

    def test_create_empty_disk(self):
        vm_id = str(uuid.uuid4())
        datastore = "ds1"
        spec = self._create_spec_for_disk_test(datastore, vm_id)

        size_mb = 100
        disk_id = str(uuid.uuid4())
        self.vm_config.create_empty_disk(spec, datastore, disk_id, size_mb)

        devs = [change.device for change in spec.deviceChange]
        device_type = vim.vm.device.VirtualDisk
        disks = self.vm_config.find_devices(devs, device_type)
        assert_that(len(disks), equal_to(1))
        # verify that uuid to be set on disk to be added matches the
        # of the disk (modulo some formatting differences)
        assert_that(disks[0].backing.uuid,
                    equal_to(uuid_to_vmdk_uuid(disk_id)))

    def test_create_child_disk(self):
        vm_id = str(uuid.uuid4())
        datastore = "ds1"
        spec = self._create_spec_for_disk_test(datastore, vm_id)

        disk_id = str(uuid.uuid4())
        parent_id = str(uuid.uuid4())
        self.vm_config.create_child_disk(spec, datastore, disk_id, parent_id)

        devs = [change.device for change in spec.deviceChange]
        device_type = vim.vm.device.VirtualDisk
        disks = self.vm_config.find_devices(devs, device_type)
        assert_that(len(disks), equal_to(1))
        # verify that disk to be added does not request a specifc uuid
        assert_that(disks[0].backing.uuid, equal_to(None))

    def _get_config_info_with_iso(self, iso_path):
        devices = self.dummy_devices()
        cfg_info = FakeConfigInfo()
        cfg_info.hardware.device = devices

        cdrom = vim.vm.device.VirtualCdrom()
        cdrom.key = 1234
        cdrom.controllerKey = 100
        cdrom.unitNumber = 1

        iso_backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
        iso_backing.fileName = iso_path
        cdrom.backing = iso_backing

        conInfo = vim.vm.device.VirtualDevice.ConnectInfo()
        conInfo.allowGuestControl = True
        conInfo.connected = True
        conInfo.startConnected = True
        cdrom.connectable = conInfo
        cfg_info.hardware.device.append(cdrom)
        return cfg_info

    def _get_config_info_without_connected(self, is_iso_backing):
        devices = self.dummy_devices()
        cfg_info = FakeConfigInfo()
        cfg_info.hardware.device = devices

        cdrom = vim.vm.device.VirtualCdrom()
        cdrom.key = 1234
        cdrom.controllerKey = 100
        cdrom.unitNumber = 1

        if is_iso_backing:
            iso_backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
            cdrom.backing = iso_backing

        conInfo = vim.vm.device.VirtualDevice.ConnectInfo()
        conInfo.allowGuestControl = True
        conInfo.connected = False
        conInfo.startConnected = True
        cdrom.connectable = conInfo
        cfg_info.hardware.device.append(cdrom)
        return cfg_info

    def test_add_iso_cdrom(self):
        virtual_ide_controller = vim.vm.device.VirtualIDEController()
        cfgOption = vim.vm.ConfigOption()
        cfgOption.defaultDevice.append(virtual_ide_controller)
        self.vm_config._cfg_opts = cfgOption
        # fake iso ds path
        fake_iso_ds_path = '[ds] vms/fa/fake/fake.iso'

        # test if no virtual cdrom attached to the VM
        cfg_info = FakeConfigInfo()

        cspec = self.vm_config.update_spec()

        result = self.vm_config.add_iso_cdrom(
            cspec,
            fake_iso_ds_path,
            cfg_info)

        assert_that(result.__class__,
                    equal_to(bool))
        assert_that(result, equal_to(True))

        dev = cspec.deviceChange[0].device
        assert_that(len(cspec.deviceChange), equal_to(1))
        assert_that(dev.connectable.connected, equal_to(True))
        assert_that(dev.connectable.startConnected, equal_to(True))
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))

        # test if virtual cdrom exist and ISO already attached to the VM
        cspec = self.vm_config.update_spec()

        cfg_info = self._get_config_info_with_iso(fake_iso_ds_path)

        result = self.vm_config.add_iso_cdrom(
            cspec,
            fake_iso_ds_path,
            cfg_info)

        assert_that(result.__class__,
                    equal_to(bool))
        assert_that(result, equal_to(False))

        # test if virtual cdrom exist and it's iso_backing
        # and ISO is not attached to the VM
        cspec = self.vm_config.update_spec()

        cfg_info = self._get_config_info_without_connected(is_iso_backing=True)

        result = self.vm_config.add_iso_cdrom(
            cspec,
            fake_iso_ds_path,
            cfg_info)

        assert_that(result.__class__,
                    equal_to(bool))
        assert_that(result, equal_to(True))

        dev = cspec.deviceChange[0].device
        assert_that(len(cspec.deviceChange), equal_to(1))
        assert_that(dev.connectable.connected, equal_to(True))
        assert_that(dev.connectable.startConnected, equal_to(True))
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))

        # test if virtual cdrom exist and it's _not_ iso_backing
        # and ISO is not attached to the VM
        cspec = self.vm_config.update_spec()

        cfg_info = self._get_config_info_without_connected(
            is_iso_backing=False)

        self.assertRaises(TypeError,
                          self.vm_config.add_iso_cdrom,
                          cspec, fake_iso_ds_path, cfg_info)

    def test_disconnect_iso(self):
        # on vm config with no cdrom devices
        cfg_info = FakeConfigInfo()
        cspec = self.vm_config.update_spec()
        self.assertRaises(DeviceNotFoundException,
                          self.vm_config.disconnect_iso_cdrom,
                          cspec, cfg_info)
        assert_that(len(cspec.deviceChange), equal_to(0))

        # on vm config with no a fake cdrom device
        fake_iso_ds_path = '[ds] vms/fa/fake/fake.iso'
        cspec = self.vm_config.update_spec()
        cfg_info = self._get_config_info_with_iso(fake_iso_ds_path)
        iso_path = self.vm_config.disconnect_iso_cdrom(cspec, cfg_info)

        assert_that(len(cspec.deviceChange), equal_to(1))
        dev = cspec.deviceChange[0].device
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))
        assert_that(dev.backing.fileName,
                    equal_to(fake_iso_ds_path))

        assert_that(iso_path, equal_to(fake_iso_ds_path))
        assert_that(dev.connectable.connected, equal_to(False))
        assert_that(dev.connectable.startConnected, equal_to(False))

    def test_remove_iso_cdrom_device(self):
        fake_iso_ds_path = '[ds] vms/fa/fake/fake.iso'
        cspec = self.vm_config.update_spec()
        cfg_info = self._get_config_info_with_iso(fake_iso_ds_path)
        self.vm_config.remove_iso_cdrom(cspec, cfg_info)

        assert_that(len(cspec.deviceChange), equal_to(1))
        assert_that(cspec.deviceChange[0].operation, equal_to('remove'))
        dev = cspec.deviceChange[0].device
        assert_that(dev.backing.__class__,
                    equal_to(vim.vm.device.VirtualCdrom.IsoBackingInfo))
        assert_that(dev.backing.fileName,
                    equal_to(fake_iso_ds_path))

    def test_update_spec(self):
        cfg_info = FakeConfigInfo()
        spec = self.vm_config.update_spec()
        assert_that(len(spec.deviceChange), equal_to(0))
        net_name = "VM_Network"
        self.vm_config.add_nic(spec, net_name)
        assert_that(len(spec.deviceChange), equal_to(1))
        self.vm_config.add_scsi_disk(cfg_info, spec, "ds1", "foo")
        # One for the controller and one for the disk itself.
        assert_that(len(spec.deviceChange), equal_to(3))

    def test_path_conversion_invalid(self):
        self.assertRaises(IndexError, datastore_to_os_path, "invalid_ds_path")

    @parameterized.expand([
        ('[foo] a/b/c.vmdk', '/vmfs/volumes/foo/a/b/c.vmdk'),
        ('[foo] c.vmdk', '/vmfs/volumes/foo/c.vmdk'),
        ('[foo]a', '/vmfs/volumes/foo/a'),
        ('/vmfs/volumes/foo/bar.vmdk', '/vmfs/volumes/foo/bar.vmdk'),
        ('[]/vmfs/volumes/foo/bar.vmdk', '/vmfs/volumes/foo/bar.vmdk'),
        ('[] /vmfs/volumes/foo/bar.vmdk', '/vmfs/volumes/foo/bar.vmdk')
    ])
    def test_path_conversion(self, ds_path, expected_os_path):
        path = datastore_to_os_path(ds_path)
        assert_that(path, equal_to(expected_os_path))

    @parameterized.expand([
        (['[foo] images/a/b/c.vmdk'], True, False, False),
        (['[foo] vms/a/b/c.vmdk'], False, True, False),
        (['[foo] images/a/b/c.vmdk', '[foo] vms/a.vmdk'], False, True, False),
        (['[foo] disks/a/b/c.vmdk'], False, False, True),
        (['[foo] images/a/c.vmdk', '[foo] disks/a.vmdk'], False, False, True),
        ([], False, False, False)
    ])
    def test_is_what_disk(self, disk_files, image, ephemeral, persistent):
        assert_that(is_image(disk_files), equal_to(image))
        assert_that(is_ephemeral_disk(disk_files), equal_to(ephemeral))
        assert_that(is_persistent_disk(disk_files), equal_to(persistent))

    @parameterized.expand([
        ("ds1", "image_id",
         "/vmfs/volumes/ds1/images/im/image_id/image_id.manifest"),
        ("123 456", "image_id",
         "/vmfs/volumes/123 456/images/im/image_id/image_id.manifest"),
    ])
    def test_os_image_manifest_path(self, datastore, image_id, expected):
        assert_that(os_image_manifest_path(datastore, image_id),
                    equal_to(expected))

    def test_vmdk_uuid_conversion(self):
        for id in ['01234567-89ab-cedf-0123-456789abcdef',
                   '01 23456 789ABCEDF0123456789ABCDEF',
                   '01 23 45 67 89 ab ce df-01 23 45 67 89 ab cd ef',
                   '0123456789abcedf0123456789abcdef']:
            vmdk_uuid = uuid_to_vmdk_uuid(id)
            assert_that(
                vmdk_uuid,
                equal_to('01 23 45 67 89 ab ce df-01 23 45 67 89 ab cd ef'))
        for id in ['',
                   '01234567-89ab-cedf-0123-456789abcd',
                   '01 23456 789abcedf0123456789abcdefabcd']:
            self.assertRaises(ValueError, uuid_to_vmdk_uuid, id)
Exemplo n.º 11
0
class TestVimClient(unittest.TestCase):
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(self.host, "root", self.pwd,
                                    auto_sync=True)
        self.vm_config = EsxVmConfig(self.vim_client)
        self._logger = logging.getLogger(__name__)

    def tearDown(self):
        self.vim_client.disconnect(wait=True)

    def test_memory_usage(self):
        used_memory = self.vim_client.memory_usage_mb
        assert_that(used_memory > 0, is_(True))

    def test_total_memory(self):
        total_memory = self.vim_client.total_vmusable_memory_mb
        assert_that(total_memory > 0, is_(True))

    def test_total_cpus(self):
        num_cpus = self.vim_client.num_physical_cpus
        assert_that(num_cpus > 0, is_(True))

    def _create_test_vm(self, suffix="host-integ"):
        # Create VM
        vm_id = "vm_%s-%s-%s" % (
            time.strftime("%Y-%m-%d-%H%M%S", time.localtime()),
            str(random.randint(100000, 1000000)),
            suffix)

        datastore = self.vim_client.get_datastore().name
        disk_path = "[%s] %s/disk.vmdk" % (datastore, vm_id)
        create_spec = self.get_create_spec(datastore, vm_id, disk_path)
        folder = self.vim_client.vm_folder
        resource_pool = self.vim_client.root_resource_pool
        task = folder.CreateVm(create_spec, resource_pool, None)
        self.vim_client.wait_for_task(task)
        vm = self.vim_client.get_vm(vm_id)
        return (vm_id, vm, datastore, disk_path)

    def test_get_cached_vm(self):
        vm_id, vm, datastore, disk_path = self._create_test_vm("vm-cache-test")

        # Verify VM is in cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOff))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Make sure get_vm_in_cache works
        vm_from_cache = self.vim_client.get_vm_in_cache(vm_id)
        assert_that(vm_from_cache.name, is_(vm_id))
        self.assertRaises(VmNotFoundException,
                          self.vim_client.get_vm_in_cache, "missing")

        # Add disk
        disk2_path = "[%s] %s/disk2.vmdk" % (datastore, vm_id)
        update_spec = self.get_update_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(update_spec)
        self.vim_client.wait_for_task(task)

        # For the ReconfigVM task to remove disk, the hostd could update
        # task status to success before updating VM status. Thus when
        # wait_for_task returns, the vm_cache is possible to be still in old
        # state, though eventually it converges to consistent state. It only
        # happens in this task AFAIK. It should be fine for this task, because
        # rarely there is other operation that depends on this task.
        self._wait_vm_has_disk(vm_id, 2)

        # Verify disk added
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms[0].disks), is_(2))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path, disk2_path))

        # Remove disk
        vm = self.vim_client.get_vm(vm_id)
        remove_spec = self.get_remove_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(remove_spec)
        self.vim_client.wait_for_task(task)

        # Same as before when disk is added
        self._wait_vm_has_disk(vm_id, 1)

        # Verify disk removed
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(len(found_vms[0].disks), is_(1), "disk2 in " +
                                                     str(found_vms[0].disks))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path))

        # Power on vm
        task = vm.PowerOn()
        self.vim_client.wait_for_task(task)

        # Wait until it disappears from the cache
        self._wait_vm_power_status(vm_id, PowerState.poweredOn)

        # Verify VM state in cache is updated
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOn))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Destroy VM
        task = vm.PowerOff()
        self.vim_client.wait_for_task(task)
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # Verify VM is deleted from cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(0))

    def test_no_datastore_update(self):
        """ Test datastore update is no longer triggered on VM creates/deletes
        """

        class UpdateListener(object):
            def __init__(self):
                self._ds_update_count = 0

            def datastores_updated(self):
                self._ds_update_count += 1

            def networks_updated(self):
                pass

            def virtual_machines_updated(self):
                pass

        listener = UpdateListener()
        self.vim_client.add_update_listener(listener)
        # listener always gets updated once on add
        assert_that(listener._ds_update_count, is_(1))

        mock_apply = MagicMock(wraps=self.vim_client._apply_ds_update)
        self.vim_client._apply_ds_update = mock_apply

        _, vm, _, _ = self._create_test_vm("ds-update-test")
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # expect to get a datastore property update (unfortunately) ...
        for _ in xrange(50):
            if mock_apply.call_count > 0:
                break
            time.sleep(0.1)
        # ... but that additional datastore updated notifications are sent out
        # as a result
        assert_that(listener._ds_update_count, is_(1))

    def get_create_spec(self, datastore, vm_id, disk_path):
        create_spec = vim.vm.ConfigSpec(
            name=vm_id,
            guestId="otherGuest",
            memoryMB=64,
            numCPUs=2,
            files=vim.vm.FileInfo(vmPathName="[%s] /" % datastore),
            deviceChange=[],
        )
        controller = vim.vm.device.VirtualLsiLogicController(
            key=1,
            sharedBus=vim.vm.device.VirtualSCSIController.Sharing.noSharing,
            busNumber=2,
            unitNumber=-1)
        self.vm_config.add_device(create_spec, controller)
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        disk = vim.vm.device.VirtualDisk(
            controllerKey=1,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(create_spec, disk)
        return create_spec

    def get_update_spec(self, vm_info, disk_path):
        update_spec = vim.vm.ConfigSpec()
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        controller = \
            self.vm_config._find_scsi_controller(update_spec,
                                                 vm_info.config)
        disk = vim.vm.device.VirtualDisk(
            controllerKey=controller.key,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(update_spec, disk)
        return update_spec

    def get_remove_spec(self, vm_info, disk_path):
        remove_spec = vim.vm.ConfigSpec()
        devices = self.vm_config.get_devices_from_config(vm_info.config)
        found_device = None
        for device in devices:
            if isinstance(device, vim.vm.device.VirtualDisk) and \
                    device.backing.fileName.endswith(disk_path):
                found_device = device
        self.vm_config.remove_device(remove_spec, found_device)
        return remove_spec

    def test_clone_ticket(self):
        ticket = self.vim_client.acquire_clone_ticket()
        vim_client2 = VimClient(host=self.host, ticket=ticket)
        vim_client2.host_system

    def test_http_ticket(self):
        datastore = self.vim_client.get_datastore().name
        filename = "%s.bin" % str(uuid.uuid4())
        quoted_dc_name = 'ha%252ddatacenter'
        url = 'https://%s/folder/%s?dcPath=%s&dsName=%s' % (
            self.host, filename, quoted_dc_name, datastore)

        ticket = self.vim_client.acquire_cgi_ticket(url, HttpOp.PUT)
        assert_that(ticket, is_not(equal_to(None)))

    def test_host_stats(self):
        """ Skip host stats test.
        This test does not agree with the contract exposed from
        the implementation.
        Until the vim_client code be refactor/cleanup, disable this test for
        now.
        """
        raise SkipTest()

        self.vim_client.initialize_host_counters()
        self.vim_client.update_hosts_stats()
        stats = self.vim_client.get_host_stats()
        assert_that(has_key('mem.consumed'))
        assert_that(stats['mem.consumed'], greater_than(0))
        assert_that(has_key('rescpu.actav1'))
        assert_that(stats['rescpu.actav1'], greater_than(0))

    def _wait_vm_has_disk(self, vm_id, disk_num):
        """Wait until the vm has disk number of the vm becomes disk_num
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if len(vm_in_cache.disks) == disk_num:
                self._logger.info("VmCache disk number synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)

    def _wait_vm_power_status(self, vm_id, power_state):
        """Wait until the vm has power_state
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if vm_in_cache.power_state == power_state:
                self._logger.info("VmCache power_state synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)
Exemplo n.º 12
0
class HttpNfcTransferer(HttpTransferer):
    """ Class for handling HTTP-based disk transfers between ESX hosts.

    This class employs the ImportVApp and ExportVM APIs to transfer
    VMDKs efficiently to another host. A shadow VM is created and used in the
    initial export of the VMDK into the stream optimized format needed by
    ImportVApp.

    """

    LEASE_INITIALIZATION_WAIT_SECS = 10

    def __init__(self, vim_client, image_datastores, host_name="localhost"):
        super(HttpNfcTransferer, self).__init__(vim_client)
        self.lock = threading.Lock()
        self._shadow_vm_id = "shadow_%s" % self._vim_client.host_uuid
        self._lease_url_host_name = host_name
        self._image_datastores = image_datastores
        self._vm_config = EsxVmConfig(self._vim_client)
        self._vm_manager = EsxVmManager(self._vim_client, None)

    def _get_remote_connections(self, host, port):
        agent_client = DirectClient("Host", Host.Client, host, port)
        agent_client.connect()
        request = ServiceTicketRequest(service_type=ServiceType.VIM)
        response = agent_client.get_service_ticket(request)
        if response.result != ServiceTicketResultCode.OK:
            self._logger.info("Get service ticket failed. Response = %s" %
                              str(response))
            raise ValueError("No ticket")
        vim_client = VimClient(
            host=host, ticket=response.vim_ticket, auto_sync=False)
        return agent_client, vim_client

    def _get_disk_url_from_lease(self, lease):
        for dev_url in lease.info.deviceUrl:
            self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url))
            return dev_url.url

    def _wait_for_lease(self, lease):
        retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS
        state = None
        while retries > 0:
            state = lease.state
            if state != vim.HttpNfcLease.State.initializing:
                break
            retries -= 1
            time.sleep(1)

        if retries == 0:
            self._logger.debug("Nfc lease initialization timed out")
            raise NfcLeaseInitiatizationTimeout()
        if state == vim.HttpNfcLease.State.error:
            self._logger.debug("Fail to initialize nfc lease: %s" %
                               str(lease.error))
            raise NfcLeaseInitiatizationError()

    def _ensure_host_in_url(self, url, actual_host):

        # URLs from vApp export/import leases have '*' as placeholder
        # for host names that has to be replaced with the actual
        # host on which the resource resides.
        protocol, host, selector = self._split_url(url)
        if host.find("*") != -1:
            host = host.replace("*", actual_host)
        return "%s://%s%s" % (protocol, host, selector)

    def _export_shadow_vm(self):
        """ Initiates the Export VM operation.

        The lease created as part of ExportVM contains, among other things,
        the url to the stream-optimized disk of the image currently associated
        with the VM being exported.
        """
        vm = self._vim_client.get_vm_obj_in_cache(self._shadow_vm_id)
        lease = vm.ExportVm()
        self._wait_for_lease(lease)
        return lease, self._get_disk_url_from_lease(lease)

    def _get_shadow_vm_datastore(self):
        # The datastore in which the shadow VM will be created.
        return self._image_datastores[0]

    def _ensure_shadow_vm(self):
        """ Creates a shadow vm specifically for use by this host if absent.

        The shadow VM created is used to facilitate host-to-host transfer
        of any image accessible on this host to another datastore not directly
        accessible from this host.
        """
        vm_id = self._shadow_vm_id
        if self._vm_manager.has_vm(vm_id):
            self._logger.debug("shadow vm exists")
            return

        spec = self._vm_config.create_spec(
            vm_id=vm_id, datastore=self._get_shadow_vm_datastore(),
            memory=32, cpus=1)
        try:
            self._vm_manager.create_vm(vm_id, spec)
        except Exception:
            self._logger.exception("Error creating vm with id %s" % vm_id)
            raise

    def _configure_shadow_vm_with_disk(self, image_id, image_datastore):
        """ Reconfigures the shadow vm to contain only one image disk. """
        try:
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(self._shadow_vm_id)
            self._vm_manager.remove_all_disks(spec, info)
            self._vm_manager.add_disk(spec, image_datastore, image_id, info,
                                      disk_is_image=True)
            self._vm_manager.update_vm(self._shadow_vm_id, spec)
        except Exception:
            self._logger.exception(
                "Error configuring shadow vm with image %s" % image_id)
            raise

    def _get_image_stream_from_shadow_vm(self, image_id, image_datastore):
        """ Obtain a handle to the streamOptimized disk from shadow vm.

        The stream-optimized disk is obtained via configuring a shadow
        VM with the image disk we are interested in and exporting the
        reconfigured shadow VM.

        """

        self._ensure_shadow_vm()
        self._configure_shadow_vm_with_disk(image_id, image_datastore)
        lease, disk_url = self._export_shadow_vm()
        disk_url = self._ensure_host_in_url(disk_url,
                                            self._lease_url_host_name)
        return lease, disk_url

    def _create_import_vm_spec(self, image_id, datastore):
        vm_name = "h2h_%s" % str(uuid.uuid4())
        spec = self._vm_config.create_spec_for_import(vm_id=vm_name,
                                                      image_id=image_id,
                                                      datastore=datastore,
                                                      memory=32,
                                                      cpus=1)

        # Just specify a tiny capacity in the spec for now; the eventual vm
        # disk will be based on what is uploaded via the http nfc url.
        spec = self._vm_manager.create_empty_disk(spec, datastore, None,
                                                  size_mb=1)

        import_spec = vim.vm.VmImportSpec(configSpec=spec)
        return import_spec

    def _get_url_from_import_vm(self, dst_vim_client, import_spec):
        vm_folder = dst_vim_client.vm_folder
        root_rp = dst_vim_client.root_resource_pool
        lease = root_rp.ImportVApp(import_spec, vm_folder)
        self._wait_for_lease(lease)
        disk_url = self._get_disk_url_from_lease(lease)
        disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host)
        return lease, disk_url

    def _register_imported_image_at_host(self, agent_client,
                                         image_id, destination_datastore,
                                         imported_vm_name, metadata, manifest):
        """ Installs an image at another host.

        Image data was transferred via ImportVApp to said host.
        """

        request = ReceiveImageRequest(
            image_id=image_id,
            datastore_id=destination_datastore,
            transferred_image_id=imported_vm_name,
            metadata=metadata,
            manifest=manifest,
        )

        response = agent_client.receive_image(request)
        if response.result == ReceiveImageResultCode.DESTINATION_ALREADY_EXIST:
            raise DiskAlreadyExistException(response.error)
        if response.result != ReceiveImageResultCode.OK:
            raise ReceiveImageException(response.result, response.error)

    def _read_metadata(self, image_datastore, image_id):
        try:
            # Transfer raw manifest
            manifest_path = os_image_manifest_path(image_datastore, image_id)
            with open(manifest_path) as f:
                manifest = f.read()

            # Transfer raw metadata
            metadata_path = os_metadata_path(image_datastore, image_id,
                                             IMAGE_FOLDER_NAME)
            metadata = None
            if os.path.exists(metadata_path):
                with open(metadata_path, 'r') as f:
                    metadata = f.read()

            return manifest, metadata
        except:
            self._logger.exception("Failed to read metadata")
            raise

    @lock_non_blocking
    def send_image_to_host(self, image_id, image_datastore,
                           destination_image_id, destination_datastore,
                           host, port, intermediate_file_path=None):
        manifest, metadata = self._read_metadata(image_datastore, image_id)

        read_lease, disk_url = self._get_image_stream_from_shadow_vm(
            image_id, image_datastore)

        # Save stream-optimized disk to a unique path locally for now.
        # TODO(vui): Switch to chunked transfers to handle not knowing content
        # length in the full streaming mode.

        if intermediate_file_path:
            tmp_path = intermediate_file_path
        else:
            tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % (
                self._get_shadow_vm_datastore(),
                self._shadow_vm_id)
        try:
            self.download_file(disk_url, tmp_path)
        finally:
            read_lease.Complete()

        if destination_image_id is None:
            destination_image_id = image_id
        spec = self._create_import_vm_spec(
            destination_image_id, destination_datastore)

        agent_client, vim_client = self._get_remote_connections(host, port)
        try:
            write_lease, disk_url = self._get_url_from_import_vm(vim_client,
                                                                 spec)
            try:
                self.upload_file(tmp_path, disk_url)
            finally:
                write_lease.Complete()
                try:
                    os.unlink(tmp_path)
                except OSError:
                    pass

            # TODO(vui): imported vm name should be made unique to remove
            # ambiguity during subsequent lookup
            imported_vm_name = destination_image_id

            self._register_imported_image_at_host(
                agent_client, destination_image_id, destination_datastore,
                imported_vm_name, metadata, manifest)

        finally:
            agent_client.close()
            vim_client.disconnect()

        return imported_vm_name
Exemplo n.º 13
0
class HttpNfcTransferer(HttpTransferer):
    """ Class for handling HTTP-based disk transfers between ESX hosts.

    This class employs the ImportVApp and ExportVM APIs to transfer
    VMDKs efficiently to another host. A shadow VM is created and used in the
    initial export of the VMDK into the stream optimized format needed by
    ImportVApp.

    """

    LEASE_INITIALIZATION_WAIT_SECS = 10

    def __init__(self, vim_client, image_datastores, host_name="localhost"):
        super(HttpNfcTransferer, self).__init__(vim_client)
        self.lock = threading.Lock()
        self._shadow_vm_id = "shadow_%s" % self._vim_client.host_uuid
        self._lease_url_host_name = host_name
        self._image_datastores = image_datastores
        self._vm_config = EsxVmConfig(self._vim_client)
        self._vm_manager = EsxVmManager(self._vim_client, None)

    def _get_remote_connections(self, host, port):
        agent_client = DirectClient("Host", Host.Client, host, port)
        agent_client.connect()
        request = ServiceTicketRequest(service_type=ServiceType.VIM)
        response = agent_client.get_service_ticket(request)
        if response.result != ServiceTicketResultCode.OK:
            self._logger.info("Get service ticket failed. Response = %s" %
                              str(response))
            raise ValueError("No ticket")
        vim_client = VimClient(host=host,
                               ticket=response.vim_ticket,
                               auto_sync=False)
        return agent_client, vim_client

    def _get_disk_url_from_lease(self, lease):
        for dev_url in lease.info.deviceUrl:
            self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url))
            return dev_url.url

    def _wait_for_lease(self, lease):
        retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS
        state = None
        while retries > 0:
            state = lease.state
            if state != vim.HttpNfcLease.State.initializing:
                break
            retries -= 1
            time.sleep(1)

        if retries == 0:
            self._logger.debug("Nfc lease initialization timed out")
            raise NfcLeaseInitiatizationTimeout()
        if state == vim.HttpNfcLease.State.error:
            self._logger.debug("Fail to initialize nfc lease: %s" %
                               str(lease.error))
            raise NfcLeaseInitiatizationError()

    def _ensure_host_in_url(self, url, actual_host):

        # URLs from vApp export/import leases have '*' as placeholder
        # for host names that has to be replaced with the actual
        # host on which the resource resides.
        protocol, host, selector = self._split_url(url)
        if host.find("*") != -1:
            host = host.replace("*", actual_host)
        return "%s://%s%s" % (protocol, host, selector)

    def _export_shadow_vm(self):
        """ Initiates the Export VM operation.

        The lease created as part of ExportVM contains, among other things,
        the url to the stream-optimized disk of the image currently associated
        with the VM being exported.
        """
        vm = self._vim_client.get_vm_obj_in_cache(self._shadow_vm_id)
        lease = vm.ExportVm()
        self._wait_for_lease(lease)
        return lease, self._get_disk_url_from_lease(lease)

    def _get_shadow_vm_datastore(self):
        # The datastore in which the shadow VM will be created.
        return self._image_datastores[0]

    def _ensure_shadow_vm(self):
        """ Creates a shadow vm specifically for use by this host if absent.

        The shadow VM created is used to facilitate host-to-host transfer
        of any image accessible on this host to another datastore not directly
        accessible from this host.
        """
        vm_id = self._shadow_vm_id
        if self._vm_manager.has_vm(vm_id):
            self._logger.debug("shadow vm exists")
            return

        spec = self._vm_config.create_spec(
            vm_id=vm_id,
            datastore=self._get_shadow_vm_datastore(),
            memory=32,
            cpus=1)
        try:
            self._vm_manager.create_vm(vm_id, spec)
        except Exception:
            self._logger.exception("Error creating vm with id %s" % vm_id)
            raise

    def _configure_shadow_vm_with_disk(self, image_id, image_datastore):
        """ Reconfigures the shadow vm to contain only one image disk. """
        try:
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(self._shadow_vm_id)
            self._vm_manager.remove_all_disks(spec, info)
            self._vm_manager.add_disk(spec,
                                      image_datastore,
                                      image_id,
                                      info,
                                      disk_is_image=True)
            self._vm_manager.update_vm(self._shadow_vm_id, spec)
        except Exception:
            self._logger.exception(
                "Error configuring shadow vm with image %s" % image_id)
            raise

    def _get_image_stream_from_shadow_vm(self, image_id, image_datastore):
        """ Obtain a handle to the streamOptimized disk from shadow vm.

        The stream-optimized disk is obtained via configuring a shadow
        VM with the image disk we are interested in and exporting the
        reconfigured shadow VM.

        """

        self._ensure_shadow_vm()
        self._configure_shadow_vm_with_disk(image_id, image_datastore)
        lease, disk_url = self._export_shadow_vm()
        disk_url = self._ensure_host_in_url(disk_url,
                                            self._lease_url_host_name)
        return lease, disk_url

    def _create_import_vm_spec(self, image_id, datastore):
        vm_name = "h2h_%s" % str(uuid.uuid4())
        spec = self._vm_config.create_spec_for_import(vm_id=vm_name,
                                                      image_id=image_id,
                                                      datastore=datastore,
                                                      memory=32,
                                                      cpus=1)

        # Just specify a tiny capacity in the spec for now; the eventual vm
        # disk will be based on what is uploaded via the http nfc url.
        spec = self._vm_manager.create_empty_disk(spec,
                                                  datastore,
                                                  None,
                                                  size_mb=1)

        import_spec = vim.vm.VmImportSpec(configSpec=spec)
        return import_spec

    def _get_url_from_import_vm(self, dst_vim_client, import_spec):
        vm_folder = dst_vim_client.vm_folder
        root_rp = dst_vim_client.root_resource_pool
        lease = root_rp.ImportVApp(import_spec, vm_folder)
        self._wait_for_lease(lease)
        disk_url = self._get_disk_url_from_lease(lease)
        disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host)
        return lease, disk_url

    def _register_imported_image_at_host(self, agent_client, image_id,
                                         destination_datastore,
                                         imported_vm_name):
        """ Installs an image at another host.

        Image data was transferred via ImportVApp to said host.
        """

        request = ReceiveImageRequest(image_id=image_id,
                                      datastore_id=destination_datastore,
                                      transferred_image_id=imported_vm_name)

        response = agent_client.receive_image(request)
        if response.result != ReceiveImageResultCode.OK:
            raise ReceiveImageException(response.result, response.error)

    @lock_non_blocking
    def send_image_to_host(self,
                           image_id,
                           image_datastore,
                           destination_image_id,
                           destination_datastore,
                           host,
                           port,
                           intermediate_file_path=None):
        read_lease, disk_url = self._get_image_stream_from_shadow_vm(
            image_id, image_datastore)

        # Save stream-optimized disk to a unique path locally for now.
        # TODO(vui): Switch to chunked transfers to handle not knowing content
        # length in the full streaming mode.

        if intermediate_file_path:
            tmp_path = intermediate_file_path
        else:
            tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % (
                self._get_shadow_vm_datastore(), self._shadow_vm_id)
        try:
            self.download_file(disk_url, tmp_path)
        finally:
            read_lease.Complete()

        if destination_image_id is None:
            destination_image_id = image_id
        spec = self._create_import_vm_spec(destination_image_id,
                                           destination_datastore)

        agent_client, vim_client = self._get_remote_connections(host, port)
        try:
            write_lease, disk_url = self._get_url_from_import_vm(
                vim_client, spec)
            try:
                self.upload_file(tmp_path, disk_url)
            finally:
                write_lease.Complete()
                try:
                    os.unlink(tmp_path)
                except OSError:
                    pass

            # TODO(vui): imported vm name should be made unique to remove
            # ambiguity during subsequent lookup
            imported_vm_name = destination_image_id

            self._register_imported_image_at_host(agent_client,
                                                  destination_image_id,
                                                  destination_datastore,
                                                  imported_vm_name)

        finally:
            agent_client.close()
            vim_client.disconnect()

        return imported_vm_name
Exemplo n.º 14
0
class TestVimClient(unittest.TestCase):
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(self.host, "root", self.pwd,
                                    auto_sync=True)
        self.vm_config = EsxVmConfig(self.vim_client)
        self._logger = logging.getLogger(__name__)

    def tearDown(self):
        self.vim_client.disconnect(wait=True)

    def test_memory_usage(self):
        used_memory = self.vim_client.memory_usage_mb
        assert_that(used_memory > 0, is_(True))

    def test_total_memory(self):
        total_memory = self.vim_client.total_vmusable_memory_mb
        assert_that(total_memory > 0, is_(True))

    def test_total_cpus(self):
        num_cpus = self.vim_client.num_physical_cpus
        assert_that(num_cpus > 0, is_(True))

    def _create_test_vm(self, suffix="host-integ"):
        # Create VM
        vm_id = "%s-%s-%s" % (
            time.strftime("%Y-%m-%d-%H%M%S", time.localtime()),
            str(random.randint(100000, 1000000)),
            suffix)

        datastore = self.vim_client.get_datastore().name
        disk_path = "[%s] %s/disk.vmdk" % (datastore, vm_id)
        create_spec = self.get_create_spec(datastore, vm_id, disk_path)
        folder = self.vim_client.vm_folder
        resource_pool = self.vim_client.root_resource_pool
        task = folder.CreateVm(create_spec, resource_pool, None)
        self.vim_client.wait_for_task(task)
        vm = self.vim_client.get_vm(vm_id)
        return (vm_id, vm, datastore, disk_path)

    def test_get_cached_vm(self):
        vm_id, vm, datastore, disk_path = self._create_test_vm("vm-cache-test")

        # Verify VM is in cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOff))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Make sure get_vm_in_cache works
        vm_from_cache = self.vim_client.get_vm_in_cache(vm_id)
        assert_that(vm_from_cache.name, is_(vm_id))
        self.assertRaises(VmNotFoundException,
                          self.vim_client.get_vm_in_cache, "missing")

        # Add disk
        disk2_path = "[%s] %s/disk2.vmdk" % (datastore, vm_id)
        update_spec = self.get_update_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(update_spec)
        self.vim_client.wait_for_task(task)

        # For the ReconfigVM task to remove disk, the hostd could update
        # task status to success before updating VM status. Thus when
        # wait_for_task returns, the vm_cache is possible to be still in old
        # state, though eventually it converges to consistent state. It only
        # happens in this task AFAIK. It should be fine for this task, because
        # rarely there is other operation that depends on this task.
        self._wait_vm_has_disk(vm_id, 2)

        # Verify disk added
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms[0].disks), is_(2))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path, disk2_path))

        # Remove disk
        vm = self.vim_client.get_vm(vm_id)
        remove_spec = self.get_remove_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(remove_spec)
        self.vim_client.wait_for_task(task)

        # Same as before when disk is added
        self._wait_vm_has_disk(vm_id, 1)

        # Verify disk removed
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(len(found_vms[0].disks), is_(1), "disk2 in " +
                                                     str(found_vms[0].disks))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path))

        # Power on vm
        task = vm.PowerOn()
        self.vim_client.wait_for_task(task)

        # Wait until it disappears from the cache
        self._wait_vm_power_status(vm_id, PowerState.poweredOn)

        # Verify VM state in cache is updated
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOn))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Destroy VM
        task = vm.PowerOff()
        self.vim_client.wait_for_task(task)
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # Verify VM is deleted from cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(0))

    def test_no_datastore_update(self):
        """ Test datastore update is no longer triggered on VM creates/deletes
        """

        class UpdateListener(object):
            def __init__(self):
                self._ds_update_count = 0

            def datastores_updated(self):
                self._ds_update_count += 1

            def networks_updated(self):
                pass

            def virtual_machines_updated(self):
                pass

        listener = UpdateListener()
        self.vim_client.add_update_listener(listener)
        # listener always gets updated once on add
        assert_that(listener._ds_update_count, is_(1))

        mock_apply = MagicMock(wraps=self.vim_client._apply_ds_update)
        self.vim_client._apply_ds_update = mock_apply

        _, vm, _, _ = self._create_test_vm("ds-update-test")
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # expect to get a datastore property update (unfortunately) ...
        for _ in xrange(50):
            if mock_apply.call_count > 0:
                break
            time.sleep(0.1)
        # ... but that additional datastore updated notifications are sent out
        # as a result
        assert_that(listener._ds_update_count, is_(1))

    def get_create_spec(self, datastore, vm_id, disk_path):
        create_spec = vim.vm.ConfigSpec(
            name=vm_id,
            guestId="otherGuest",
            memoryMB=64,
            numCPUs=2,
            files=vim.vm.FileInfo(vmPathName="[%s] /" % datastore),
            deviceChange=[],
        )
        controller = vim.vm.device.VirtualLsiLogicController(
            key=1,
            sharedBus=vim.vm.device.VirtualSCSIController.Sharing.noSharing,
            busNumber=2,
            unitNumber=-1)
        self.vm_config.add_device(create_spec, controller)
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        disk = vim.vm.device.VirtualDisk(
            controllerKey=1,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(create_spec, disk)
        return create_spec

    def get_update_spec(self, vm_info, disk_path):
        update_spec = vim.vm.ConfigSpec()
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        controller = \
            self.vm_config._find_scsi_controller(update_spec,
                                                 vm_info.config)
        disk = vim.vm.device.VirtualDisk(
            controllerKey=controller.key,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(update_spec, disk)
        return update_spec

    def get_remove_spec(self, vm_info, disk_path):
        remove_spec = vim.vm.ConfigSpec()
        devices = self.vm_config.get_devices_from_config(vm_info.config)
        found_device = None
        for device in devices:
            if isinstance(device, vim.vm.device.VirtualDisk) and \
                    device.backing.fileName.endswith(disk_path):
                found_device = device
        self.vm_config.remove_device(remove_spec, found_device)
        return remove_spec

    def test_clone_ticket(self):
        ticket = self.vim_client.acquire_clone_ticket()
        vim_client2 = VimClient(host=self.host, ticket=ticket)
        vim_client2.host_system

    def test_http_ticket(self):
        datastore = self.vim_client.get_datastore().name
        filename = "%s.bin" % str(uuid.uuid4())
        quoted_dc_name = 'ha%252ddatacenter'
        url = 'https://%s/folder/%s?dcPath=%s&dsName=%s' % (
            self.host, filename, quoted_dc_name, datastore)

        ticket = self.vim_client.acquire_cgi_ticket(url, HttpOp.PUT)
        assert_that(ticket, is_not(equal_to(None)))

    def test_host_stats(self):
        """ Skip host stats test.
        This test does not agree with the contract exposed from
        the implementation.
        Until the vim_client code be refactor/cleanup, disable this test for
        now.
        """
        raise SkipTest()

        self.vim_client.initialize_host_counters()
        self.vim_client.update_hosts_stats()
        stats = self.vim_client.get_host_stats()
        assert_that(has_key('mem.consumed'))
        assert_that(stats['mem.consumed'], greater_than(0))
        assert_that(has_key('rescpu.actav1'))
        assert_that(stats['rescpu.actav1'], greater_than(0))

    def _wait_vm_has_disk(self, vm_id, disk_num):
        """Wait until the vm has disk number of the vm becomes disk_num
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if len(vm_in_cache.disks) == disk_num:
                self._logger.info("VmCache disk number synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)

    def _wait_vm_power_status(self, vm_id, power_state):
        """Wait until the vm has power_state
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if vm_in_cache.power_state == power_state:
                self._logger.info("VmCache power_state synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)