コード例 #1
0
 def vim_delete_vm(self, vm_id):
     """ Delete a VM using the vim client """
     try:
         vim_client = VimClient()
         vim_client.connect_ticket(self.server, self._get_vim_ticket())
         vim_vm = vim_client.get_vm(vm_id)
         if vim_vm.runtime.powerState != 'poweredOff':
             try:
                 vim_task = vim_vm.PowerOff()
                 vim_client.wait_for_task(vim_task)
             except:
                 logger.info("Cannot power off vm", exc_info=True)
         vim_task = vim_vm.Destroy()
         vim_client.wait_for_task(vim_task)
     finally:
         if vim_client:
             vim_client.disconnect()
コード例 #2
0
class TestVimClient(unittest.TestCase):
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(self.host, "root", self.pwd,
                                    auto_sync=True)
        self.vm_config = EsxVmConfig(self.vim_client)
        self._logger = logging.getLogger(__name__)

    def tearDown(self):
        self.vim_client.disconnect(wait=True)

    def test_memory_usage(self):
        used_memory = self.vim_client.memory_usage_mb
        assert_that(used_memory > 0, is_(True))

    def test_total_memory(self):
        total_memory = self.vim_client.total_vmusable_memory_mb
        assert_that(total_memory > 0, is_(True))

    def test_total_cpus(self):
        num_cpus = self.vim_client.num_physical_cpus
        assert_that(num_cpus > 0, is_(True))

    def _create_test_vm(self, suffix="host-integ"):
        # Create VM
        vm_id = "vm_%s-%s-%s" % (
            time.strftime("%Y-%m-%d-%H%M%S", time.localtime()),
            str(random.randint(100000, 1000000)),
            suffix)

        datastore = self.vim_client.get_datastore().name
        disk_path = "[%s] %s/disk.vmdk" % (datastore, vm_id)
        create_spec = self.get_create_spec(datastore, vm_id, disk_path)
        folder = self.vim_client.vm_folder
        resource_pool = self.vim_client.root_resource_pool
        task = folder.CreateVm(create_spec, resource_pool, None)
        self.vim_client.wait_for_task(task)
        vm = self.vim_client.get_vm(vm_id)
        return (vm_id, vm, datastore, disk_path)

    def test_get_cached_vm(self):
        vm_id, vm, datastore, disk_path = self._create_test_vm("vm-cache-test")

        # Verify VM is in cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOff))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Make sure get_vm_in_cache works
        vm_from_cache = self.vim_client.get_vm_in_cache(vm_id)
        assert_that(vm_from_cache.name, is_(vm_id))
        self.assertRaises(VmNotFoundException,
                          self.vim_client.get_vm_in_cache, "missing")

        # Add disk
        disk2_path = "[%s] %s/disk2.vmdk" % (datastore, vm_id)
        update_spec = self.get_update_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(update_spec)
        self.vim_client.wait_for_task(task)

        # For the ReconfigVM task to remove disk, the hostd could update
        # task status to success before updating VM status. Thus when
        # wait_for_task returns, the vm_cache is possible to be still in old
        # state, though eventually it converges to consistent state. It only
        # happens in this task AFAIK. It should be fine for this task, because
        # rarely there is other operation that depends on this task.
        self._wait_vm_has_disk(vm_id, 2)

        # Verify disk added
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms[0].disks), is_(2))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path, disk2_path))

        # Remove disk
        vm = self.vim_client.get_vm(vm_id)
        remove_spec = self.get_remove_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(remove_spec)
        self.vim_client.wait_for_task(task)

        # Same as before when disk is added
        self._wait_vm_has_disk(vm_id, 1)

        # Verify disk removed
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(len(found_vms[0].disks), is_(1), "disk2 in " +
                                                     str(found_vms[0].disks))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path))

        # Power on vm
        task = vm.PowerOn()
        self.vim_client.wait_for_task(task)

        # Wait until it disappears from the cache
        self._wait_vm_power_status(vm_id, PowerState.poweredOn)

        # Verify VM state in cache is updated
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOn))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Destroy VM
        task = vm.PowerOff()
        self.vim_client.wait_for_task(task)
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # Verify VM is deleted from cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(0))

    def test_no_datastore_update(self):
        """ Test datastore update is no longer triggered on VM creates/deletes
        """

        class UpdateListener(object):
            def __init__(self):
                self._ds_update_count = 0

            def datastores_updated(self):
                self._ds_update_count += 1

            def networks_updated(self):
                pass

            def virtual_machines_updated(self):
                pass

        listener = UpdateListener()
        self.vim_client.add_update_listener(listener)
        # listener always gets updated once on add
        assert_that(listener._ds_update_count, is_(1))

        mock_apply = MagicMock(wraps=self.vim_client._apply_ds_update)
        self.vim_client._apply_ds_update = mock_apply

        _, vm, _, _ = self._create_test_vm("ds-update-test")
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # expect to get a datastore property update (unfortunately) ...
        for _ in xrange(50):
            if mock_apply.call_count > 0:
                break
            time.sleep(0.1)
        # ... but that additional datastore updated notifications are sent out
        # as a result
        assert_that(listener._ds_update_count, is_(1))

    def get_create_spec(self, datastore, vm_id, disk_path):
        create_spec = vim.vm.ConfigSpec(
            name=vm_id,
            guestId="otherGuest",
            memoryMB=64,
            numCPUs=2,
            files=vim.vm.FileInfo(vmPathName="[%s] /" % datastore),
            deviceChange=[],
        )
        controller = vim.vm.device.VirtualLsiLogicController(
            key=1,
            sharedBus=vim.vm.device.VirtualSCSIController.Sharing.noSharing,
            busNumber=2,
            unitNumber=-1)
        self.vm_config.add_device(create_spec, controller)
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        disk = vim.vm.device.VirtualDisk(
            controllerKey=1,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(create_spec, disk)
        return create_spec

    def get_update_spec(self, vm_info, disk_path):
        update_spec = vim.vm.ConfigSpec()
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        controller = \
            self.vm_config._find_scsi_controller(update_spec,
                                                 vm_info.config)
        disk = vim.vm.device.VirtualDisk(
            controllerKey=controller.key,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(update_spec, disk)
        return update_spec

    def get_remove_spec(self, vm_info, disk_path):
        remove_spec = vim.vm.ConfigSpec()
        devices = self.vm_config.get_devices_from_config(vm_info.config)
        found_device = None
        for device in devices:
            if isinstance(device, vim.vm.device.VirtualDisk) and \
                    device.backing.fileName.endswith(disk_path):
                found_device = device
        self.vm_config.remove_device(remove_spec, found_device)
        return remove_spec

    def test_clone_ticket(self):
        ticket = self.vim_client.acquire_clone_ticket()
        vim_client2 = VimClient(host=self.host, ticket=ticket)
        vim_client2.host_system

    def test_http_ticket(self):
        datastore = self.vim_client.get_datastore().name
        filename = "%s.bin" % str(uuid.uuid4())
        quoted_dc_name = 'ha%252ddatacenter'
        url = 'https://%s/folder/%s?dcPath=%s&dsName=%s' % (
            self.host, filename, quoted_dc_name, datastore)

        ticket = self.vim_client.acquire_cgi_ticket(url, HttpOp.PUT)
        assert_that(ticket, is_not(equal_to(None)))

    def test_host_stats(self):
        """ Skip host stats test.
        This test does not agree with the contract exposed from
        the implementation.
        Until the vim_client code be refactor/cleanup, disable this test for
        now.
        """
        raise SkipTest()

        self.vim_client.initialize_host_counters()
        self.vim_client.update_hosts_stats()
        stats = self.vim_client.get_host_stats()
        assert_that(has_key('mem.consumed'))
        assert_that(stats['mem.consumed'], greater_than(0))
        assert_that(has_key('rescpu.actav1'))
        assert_that(stats['rescpu.actav1'], greater_than(0))

    def _wait_vm_has_disk(self, vm_id, disk_num):
        """Wait until the vm has disk number of the vm becomes disk_num
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if len(vm_in_cache.disks) == disk_num:
                self._logger.info("VmCache disk number synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)

    def _wait_vm_power_status(self, vm_id, power_state):
        """Wait until the vm has power_state
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if vm_in_cache.power_state == power_state:
                self._logger.info("VmCache power_state synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)
コード例 #3
0
class TestRemoteAgent(unittest.TestCase, AgentCommonTests):
    def shortDescription(self):
        return None

    def get_service_instance(self):
        # create a connection to hostd.
        request = ServiceTicketRequest(ServiceType.VIM)
        response = self.host_client.get_service_ticket(request)
        self.assertEqual(response.result, ServiceTicketResultCode.OK)

        hostd_port = 443
        vim_namespace = "vim25/5.0"
        stub = SoapStubAdapter(self.server, hostd_port, vim_namespace)
        si = vim.ServiceInstance("ServiceInstance", stub)
        si.RetrieveContent().sessionManager.CloneSession(response.vim_ticket)
        connect.SetSi(si)
        return si

    def connect_client(self, service, cls, server):
        """ Utility method to connect to a remote agent """
        max_sleep_time = 32
        sleep_time = 0.1
        while sleep_time < max_sleep_time:
            try:
                client = DirectClient(service, cls, server, 8835)
                client.connect()
                return client
            except TTransport.TTransportException:
                time.sleep(sleep_time)
                sleep_time *= 2
        self.fail("Cannot connect to agent %s" % server)

    def create_client(self):
        return self.connect_client("Host", Host.Client, self.server)

    def client_connections(self):
        self.host_client = self.create_client()
        self.control_client = self.connect_client("AgentControl", AgentControl.Client, self.server)

    def provision_hosts(self, mem_overcommit=2.0,
                        datastores=None, used_for_vms=True,
                        image_ds=None, host_id=None,
                        deployment_id="test-deployment"):
        """ Provisions the agents on the remote hosts """
        if datastores is None:
            datastores = self.get_all_datastores()
            image_datastore = self.get_image_datastore()
        elif image_ds:
            image_datastore = image_ds
        else:
            image_datastore = datastores[0]

        req = ProvisionRequest()
        req.datastores = datastores
        req.address = ServerAddress(host=self.server, port=8835)
        req.memory_overcommit = mem_overcommit
        req.image_datastore_info = ImageDatastore(
            name=image_datastore,
            used_for_vms=used_for_vms)
        req.image_datastores = set([req.image_datastore_info])
        req.management_only = True
        if host_id:
            req.host_id = host_id
        else:
            req.host_id = self.host_id

        if deployment_id:
            req.deployment_id = deployment_id
        else:
            req.deployment_id = self.deployment_id

        res = self.control_client.provision(req)

        # This will trigger a restart if the agent config changes, which
        # will happen the first time provision_hosts is called.
        self.assertEqual(res.result, ProvisionResultCode.OK)

        # Wait for up to 60 seconds for the agent to reboot.
        count = 0
        while count < 60:
            try:
                res = self.control_client.get_agent_status()
                if res.status == AgentStatusCode.OK:
                    # Agent is up
                    return
            except:
                logger.exception("Can't connect to agent")
            count += 1
            time.sleep(1)
            # Reconnect the clients
            self._close_agent_connections()
            self.client_connections()
        self.fail("Cannot connect to agent %s after provisioning" % self.server)
        return host_id

    def setUp(self):
        from testconfig import config
        if "agent_remote_test" not in config:
            raise SkipTest()

        # Set the default datastore name
        self._datastores = None

        if "datastores" in config["agent_remote_test"]:
            datastores = config["agent_remote_test"]["datastores"]
            self._datastores = [d.strip() for d in datastores.split(",")]
        else:
            self.fail("datastores not provided for test setUp")

        # Optionally update the specification of a remote iso file. The file
        # needs to exist on the remote esx server for this test to succeed.
        self._remote_iso_file = None
        self._second_remote_iso_file = None
        if ("iso_file" in config["agent_remote_test"]):
            self._remote_iso_file = config["agent_remote_test"]["iso_file"]

        if ("second_iso_file" in config["agent_remote_test"]):
            self._second_remote_iso_file = config["agent_remote_test"]["second_iso_file"]

        server = config["agent_remote_test"]["server"]
        self.server = server

        self.generation = int(time.time())

        # Connect to server and configure vim_client
        self.client_connections()
        self.vim_client = VimClient()
        self.vim_client.connect_ticket(self.server, self._get_vim_ticket())
        connect.SetSi(self.vim_client._si)

        # Set host mode to normal
        self.set_host_mode(HostMode.NORMAL)

        # The first time setup is called the agent will restart.
        self.provision_hosts()
        # Reconnect to account for the restart
        self.client_connections()
        self.clear()

    @classmethod
    def setUpClass(cls):
        cls.host_id = str(uuid.uuid4())
        cls.deployment_id = "test-deployment"

    def _close_agent_connections(self):
        self.host_client.close()
        self.control_client.close()

    def tearDown(self):
        self._close_agent_connections()
        self.vim_client.disconnect()

    def vim_delete_vm(self, vm_id):
        """ Delete a VM using the vim client """
        try:
            vim_client = VimClient()
            vim_client.connect_ticket(self.server, self._get_vim_ticket())
            vim_vm = vim_client.get_vm(vm_id)
            if vim_vm.runtime.powerState != 'poweredOff':
                try:
                    vim_task = vim_vm.PowerOff()
                    vim_client.wait_for_task(vim_task)
                except:
                    logger.info("Cannot power off vm", exc_info=True)
            vim_task = vim_vm.Destroy()
            vim_client.wait_for_task(vim_task)
        finally:
            if vim_client:
                vim_client.disconnect()

    def clear(self):
        """Remove all the VMs, disks and images """
        request = GetResourcesRequest()
        response = rpc_call(self.host_client.get_resources, request)
        assert_that(response.result, is_(GetResourcesResultCode.OK))
        for resource in response.resources:
            delete_request = Host.DeleteVmRequest(vm_id=resource.vm.id, force=True)
            response = rpc_call(self.host_client.delete_vm, delete_request)

            if response.result == DeleteVmResultCode.VM_NOT_POWERED_OFF:
                poweroff_request = Host.PowerVmOpRequest(vm_id=resource.vm.id,
                                                         op=Host.PowerVmOp.OFF)
                response = rpc_call(self.host_client.power_vm_op,
                                    poweroff_request)
                assert_that(response.result, is_(PowerVmOpResultCode.OK))
                response = rpc_call(self.host_client.delete_vm, delete_request)

            if response.result != DeleteVmResultCode.OK:
                logger.info("Cannot delete vm %s trying vim_client" % resource.vm.id)
                self.vim_delete_vm(resource.vm.id)
        self.clean_images()

    def clean_images(self):
        """ Clean up images if there are any """
        datastore = self._find_configured_datastore_in_host_config()
        request = Host.GetImagesRequest(datastore.id)
        response = self.host_client.get_images(request)
        if response.result == GetImagesResultCode.OK:
            for image_id in response.image_ids:
                if image_id == "ttylinux":
                    continue  # To be removed when we remove ttylinux.
                logging.info("Cleaning up stray image %s " % image_id)
                self._delete_image(Image(image_id, datastore))
        else:
            logger.warning("Failed to obtain the list of images to cleanup")

    def test_send_image_to_host(self):
        image_id = new_id() + "_test_xfer_image"
        image_id_2 = "%s_xfered" % image_id

        dst_image, _ = self._create_test_image(image_id)

        datastore = self._find_configured_datastore_in_host_config()
        transfer_image_request = TransferImageRequest(
            source_image_id=image_id,
            source_datastore_id=datastore.id,
            destination_host=ServerAddress(host="localhost", port=8835),
            destination_datastore_id=datastore.id,
            destination_image_id=image_id_2)
        res = self.host_client.transfer_image(transfer_image_request)
        self.assertEqual(res.result, TransferImageResultCode.OK)

        # clean up images created in test
        self._delete_image(dst_image)
        xfered_image = Image(image_id_2, datastore)
        self._delete_image(xfered_image)

    def test_host_config_after_provision(self):
        """
        Test if the agent returns the correct HostConfig
        after being provisioned
        """
        host_config_request = Host.GetConfigRequest()
        res = self.host_client.get_host_config(host_config_request)
        self.assertEqual(res.result, GetConfigResultCode.OK)

        hostConfig = res.hostConfig
        datastores = [ds.name for ds in hostConfig.datastores]
        containsDs = [ds for ds in self.get_all_datastores()
                      if ds in datastores]
        self.assertEqual(containsDs, self.get_all_datastores())
        networks = [net.id for net in hostConfig.networks]
        self.assertEqual(networks, self.vim_client.get_networks())
        self.assertEqual(hostConfig.address, ServerAddress(host=self.server,
                                                           port=8835))
        self.assertTrue(hostConfig.management_only)
        # get_host_config reports datastore id for image datastore  even if it
        # was provisioned with a datastore name.
        image_datastore_name = self.get_image_datastore()
        image_datastore_id = None
        for ds in hostConfig.datastores:
            if ds.name == image_datastore_name:
                image_datastore_id = ds.id
        self.assertEqual(list(hostConfig.image_datastore_ids)[0],
                         image_datastore_id)

    def _generate_new_iso_ds_path(self):
        if (self._remote_iso_file.lower().rfind(".iso") !=
                len(self._remote_iso_file) - 4):
            raise ValueError()

        return "%s-%s.iso" % (self._remote_iso_file[:-4], str(uuid.uuid4()))

    def _make_new_iso_copy(self, file_manager, new_iso_path):
        copy_task = file_manager.CopyFile(self._remote_iso_file, None,
                                          new_iso_path, None)
        task.WaitForTask(copy_task)

    def test_attach_cdrom(self):
        """
        Tests attach iso code path.
        1. Attach an iso to a non existent VM. Check correct error
        2. Attach a non existent iso file to a valid VM. Check correct error
        3. Attach a real iso if specified to a VM. Verify it succeeds.
        Test should pass the iso path as [datastore_name]/path/to/iso.iso
        """

        if not self._remote_iso_file:
            raise SkipTest("ISO file on server not provided")

        si = self.get_service_instance()
        file_manager = si.RetrieveContent().fileManager
        iso_path = self._generate_new_iso_ds_path()
        iso_path_2 = self._generate_new_iso_ds_path()

        vm_wrapper = VmWrapper(self.host_client)
        image = DiskImage("ttylinux", CloneType.COPY_ON_WRITE)
        disks = [
            Disk(new_id(), "default", False, True, image=image, capacity_gb=1,
                 flavor_info=self.DEFAULT_DISK_FLAVOR),
        ]

        # Create disk and VM.
        reservation = vm_wrapper.place_and_reserve(vm_disks=disks).reservation
        request = vm_wrapper.create_request(res_id=reservation)
        vm_id = vm_wrapper.create(request=request).vm.id

        # Verify the result when the VM is not found.
        fake_id = str(uuid.uuid4())
        vm_wrapper.attach_iso(fake_id, "/tmp/foo.iso",
                              Host.AttachISOResultCode.VM_NOT_FOUND)

        # Verify the result when the the iso doesn't exist.
        vm_wrapper.attach_iso(vm_id, "/tmp/foo.iso",
                              Host.AttachISOResultCode.SYSTEM_ERROR)

        self._make_new_iso_copy(file_manager, iso_path)
        self._make_new_iso_copy(file_manager, iso_path_2)

        # Doing enough attaches will indirectly verify that we do not grow the
        # device list on reattach.
        for i in xrange(3):
            # verify attach works
            vm_wrapper.attach_iso(vm_id, iso_path)
            # verify re-attach to another iso works
            vm_wrapper.attach_iso(vm_id, iso_path_2)

        vm_wrapper.power(Host.PowerVmOp.ON)
        # Verify reattach fails when vm is powered on.
        vm_wrapper.attach_iso(vm_id, iso_path,
                              Host.AttachISOResultCode.ISO_ATTACHED_ERROR)
        vm_wrapper.power(Host.PowerVmOp.OFF)

        vm_wrapper.detach_iso(vm_id, True)
        vm_wrapper.attach_iso(vm_id, iso_path)
        vm_wrapper.detach_iso(vm_id, True)

        self.clear()

    def test_detach_cdrom_failure(self):
        """ Tests failures of detach iso from VM. """
        vm_wrapper = VmWrapper(self.host_client)
        reservation = vm_wrapper.place_and_reserve().reservation
        request = vm_wrapper.create_request(res_id=reservation)
        vm_id = vm_wrapper.create(request=request).vm.id

        # no prior attach of iso
        vm_wrapper.detach_iso(vm_id, True,
                              Host.DetachISOResultCode.ISO_NOT_ATTACHED)

        # nonexistent VM id
        fake_id = str(uuid.uuid4())
        vm_wrapper.detach_iso(fake_id, True,
                              Host.DetachISOResultCode.VM_NOT_FOUND)

        # Attaching nonexistent iso path still should succeed as long
        # as a valid datastore path format is used
        random = str(uuid.uuid4())
        vm_wrapper.attach_iso(vm_id, "[] /tmp/%s_nonexistent_.iso" % random,
                              Host.AttachISOResultCode.OK)
        # Not supporting detach without delete yet.
        vm_wrapper.detach_iso(vm_id, False,
                              Host.DetachISOResultCode.SYSTEM_ERROR)
        # But detach a non-exist iso should work.
        vm_wrapper.detach_iso(vm_id, True,
                              Host.DetachISOResultCode.OK)

        vm_wrapper.delete(request=vm_wrapper.delete_request())

    def test_detach_cdrom(self):
        """
        Tests detach iso from VM.
        Verify Detaching a real iso from a VM.
        """
        if not self._remote_iso_file:
            raise SkipTest("ISO file on server not provided")

        si = self.get_service_instance()
        file_manager = si.RetrieveContent().fileManager
        iso_path = self._generate_new_iso_ds_path()
        self._make_new_iso_copy(file_manager, iso_path)
        iso_path_2 = self._generate_new_iso_ds_path()
        self._make_new_iso_copy(file_manager, iso_path_2)

        vm_wrapper = VmWrapper(self.host_client)
        reservation = vm_wrapper.place_and_reserve().reservation
        request = vm_wrapper.create_request(res_id=reservation)
        vm_id = vm_wrapper.create(request=request).vm.id

        vm_wrapper.attach_iso(vm_id, iso_path,
                              Host.AttachISOResultCode.OK)
        vm_wrapper.detach_iso(vm_id, True,
                              Host.DetachISOResultCode.OK)

        vm_wrapper.attach_iso(vm_id, iso_path_2,
                              Host.AttachISOResultCode.OK)
        # verify detach works when powered on
        vm_wrapper.power(Host.PowerVmOp.ON)
        vm_wrapper.detach_iso(vm_id, True,
                              Host.DetachISOResultCode.OK)
        vm_wrapper.power(Host.PowerVmOp.OFF)

        vm_wrapper.delete(request=vm_wrapper.delete_request())

    def test_remote_boostrap(self):
        """ Tests boostrapping of an agent against a real host """
        # We need to be able to read the config from the host to set it
        # correctly.
        # https://www.pivotaltracker.com/story/show/83243144
        raise SkipTest()
        req = self._update_agent_config()

        # Try connecting to the client in a loop.

        # Back off on failure to connect to agent
        max_sleep_time = 32
        sleep_time = 0.1
        while sleep_time < max_sleep_time:
            try:
                self.host_client.connect()
                break
            except TTransport.TTransportException:
                time.sleep(sleep_time)
                sleep_time *= 2

        self._validate_post_boostrap_config(req)

    def test_get_nfc_ticket_with_ds_id(self):
        datastores = self.vim_client.get_all_datastores()
        image_datastore = [ds for ds in datastores
                           if ds.name == self.get_image_datastore()][0]

        request = ServiceTicketRequest(service_type=ServiceType.NFC,
                                       datastore_name=image_datastore.id)
        response = self.host_client.get_service_ticket(request)
        assert_that(response.result, is_(ServiceTicketResultCode.OK))

        ticket = response.ticket
        assert_that(ticket, not_none())
        assert_that(ticket.port, is_(902))
        assert_that(ticket.service_type, is_("nfc"))
        assert_that(ticket.session_id, not_none())
        assert_that(ticket.ssl_thumbprint, not_none())

    def test_persist_mode(self):
        # Enter maintenance
        self.set_host_mode(HostMode.MAINTENANCE)

        # Restart agent by provisioning with different configuration
        self.provision_hosts(mem_overcommit=2.1)
        self.client_connections()

        # Check mode. It should still be MAINTENANCE.
        response = self.host_client.get_host_mode(GetHostModeRequest())
        assert_that(response.result, equal_to(GetHostModeResultCode.OK))
        assert_that(response.mode, equal_to(HostMode.MAINTENANCE))

    def _create_test_image(self, name):
        """ Create an test image for tests to use on a datastore """
        datastore = self._find_configured_datastore_in_host_config()

        # ttylinux is the default image that is copied to datastore
        # when agent starts
        src_image = Image("ttylinux", datastore)
        dst_image = Image(name, datastore)

        # Copy image
        request = Host.CopyImageRequest(src_image, dst_image)
        response = self.host_client.copy_image(request)
        assert_that(response.result, is_in([CopyImageResultCode.OK, CopyImageResultCode.DESTINATION_ALREADY_EXIST]))

        return dst_image, datastore

    def _get_vim_ticket(self):
        request = ServiceTicketRequest(ServiceType.VIM)
        response = self.host_client.get_service_ticket(request)
        assert_that(response.result, is_(ServiceTicketResultCode.OK))
        return response.vim_ticket

    def test_create_vm_with_ephemeral_disks_concurrent(self):
        concurrency = 5
        atmoic_lock = threading.Lock()
        results = {"count": 0}

        def _thread():
            self._test_create_vm_with_ephemeral_disks("ttylinux",
                                                      concurrent=True,
                                                      new_client=True)
            with atmoic_lock:
                results["count"] += 1

        threads = []
        for i in range(concurrency):
            thread = threading.Thread(target=_thread)
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

        assert_that(results["count"], is_(concurrency))

    def test_concurrent_copy_image(self):
        concurrency = 3
        atomic_lock = threading.Lock()
        results = {"ok": 0, "existed": 0}

        datastore = self._find_configured_datastore_in_host_config()
        new_image_id = "concurrent-copy-%s" % str(uuid.uuid4())

        src_image = Image("ttylinux", datastore)
        dst_image = Image(new_image_id, datastore)

        # verify destination_id is not in datastore
        request = Host.GetImagesRequest(datastore.id)
        response = self.host_client.get_images(request)
        assert_that(response.result, is_(GetImagesResultCode.OK))
        assert_that(response.image_ids, has_item("ttylinux"))
        assert_that(response.image_ids, not(has_item(new_image_id)))
        image_number = len(response.image_ids)

        def _thread():
            client = self.create_client()
            request = Host.CopyImageRequest(src_image, dst_image)
            response = client.copy_image(request)
            ok = response.result == CopyImageResultCode.OK
            existed = response.result == CopyImageResultCode.\
                DESTINATION_ALREADY_EXIST

            # Verify destination_id is in datastore
            request = Host.GetImagesRequest(datastore.id)
            response = client.get_images(request)
            assert_that(response.result, is_(GetImagesResultCode.OK))
            assert_that(response.image_ids, has_item("ttylinux"))
            assert_that(response.image_ids, has_item(new_image_id))
            assert_that(response.image_ids, has_length(image_number + 1))
            with atomic_lock:
                if ok:
                    results["ok"] += 1
                if existed:
                    results["existed"] += 1

        threads = []
        for i in range(concurrency):
            thread = threading.Thread(target=_thread)
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

        # Clean destination image
        self._delete_image(dst_image)

        # Only one copy is successful, all others return
        # DESTINATION_ALREADY_EXIST
        assert_that(results["ok"], is_(1))
        assert_that(results["existed"], is_(concurrency - 1))

    def test_force_delete_vm(self):
        vm_wrapper = VmWrapper(self.host_client)

        # create a vm without disk
        reservation = vm_wrapper.place_and_reserve().reservation
        request = vm_wrapper.create_request(res_id=reservation)
        vm_id = vm_wrapper.create(request=request).vm.id

        # create 2 disks
        disks = [
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, True, True,
                 capacity_gb=1, flavor_info=self.DEFAULT_DISK_FLAVOR),
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, True, True,
                 capacity_gb=1, flavor_info=self.DEFAULT_DISK_FLAVOR)
        ]

        for disk in disks:
            reservation = vm_wrapper.place_and_reserve(disk=disk).reservation
            vm_wrapper.create_disk(disk, reservation, validate=True)

        # attach disks
        disk_ids = [disk.id for disk in disks]
        vm_wrapper.attach_disks(vm_id, disk_ids)

        # delete vm fails without force
        vm_wrapper.delete(request=vm_wrapper.delete_request(),
                          expect=Host.DeleteVmResultCode.OPERATION_NOT_ALLOWED)

        # delete vm with force succeeds
        vm_wrapper.delete(request=vm_wrapper.delete_request(force=True))
        for disk_id in disk_ids:
            vm_wrapper.get_disk(disk_id, expect_found=False)

    def test_disk_uuids(self):
        # Create a vm without a root disk and blank disk then attach another
        # persistent disk. Then verify that only the uuids of the
        # ephemeral and persistent disks are updated to match their cloud ids.

        vm_wrapper = VmWrapper(self.host_client)

        disk_id_root = new_id()
        disk_id_ephemeral = new_id()
        disk_id_persistent = new_id()

        image = DiskImage("ttylinux", CloneType.COPY_ON_WRITE)
        disks = [
            Disk(disk_id_root, self.DEFAULT_DISK_FLAVOR.name, False, True,
                 image=image,
                 capacity_gb=0, flavor_info=self.DEFAULT_DISK_FLAVOR),
            Disk(disk_id_ephemeral, self.DEFAULT_DISK_FLAVOR.name, True, True,
                 capacity_gb=1, flavor_info=self.DEFAULT_DISK_FLAVOR),
        ]

        reservation = vm_wrapper.place_and_reserve(vm_disks=disks).reservation
        request = vm_wrapper.create_request(res_id=reservation)
        vm_id = vm_wrapper.create(request=request).vm.id

        # create one persistent disk
        disk = Disk(disk_id_persistent, self.DEFAULT_DISK_FLAVOR.name, True, True,
                    capacity_gb=1, flavor_info=self.DEFAULT_DISK_FLAVOR)
        reservation = vm_wrapper.place_and_reserve(disk=disk).reservation
        vm_wrapper.create_disk(disk, reservation, validate=True)
        vm_wrapper.attach_disks(vm_id, [disk_id_persistent])

        vim_vm = self.vim_client.get_vm(vm_id)

        disk_uuid_map = dict([(dev.backing.uuid, dev.backing.fileName)
                              for dev in vim_vm.config.hardware.device
                              if isinstance(dev, vim.vm.device.VirtualDisk)])

        # Assert that the UUID assigned to the ephemeral and persistent disks
        # matches their ids
        for disk_id in (disk_id_ephemeral, disk_id_persistent):
            self.assertTrue(disk_id in disk_uuid_map and
                            disk_id in disk_uuid_map[disk_id])
        # Assert that no such assignment is done for link-clone root disk.
        self.assertFalse(disk_id_root in disk_uuid_map)

        vm_wrapper.detach_disks(vm_id, [disk_id_persistent])
        vm_wrapper.delete(request=vm_wrapper.delete_request())
        vm_wrapper.delete_disks([disk_id_persistent], validate=True)

    def test_place_on_multiple_datastores(self):
        """ Test placement can actually place vm to datastores without image.
        """
        host_datastores = self.vim_client.get_all_datastores()
        image_datastore = self._find_configured_datastore_in_host_config()
        dest_datastore = None

        for ds in host_datastores:
            if ds.id != image_datastore.id:
                dest_datastore = ds
                break

        if not dest_datastore:
            raise SkipTest()

        # Test only 2 datastores, with one image datastore and another
        # datastore.
        self.provision_hosts(datastores=[image_datastore.name,
                                         dest_datastore.name],
                             used_for_vms=False)
        self.client_connections()

        concurrency = 3
        atmoic_lock = threading.Lock()
        results = {"count": 0}

        # Only copy image to datastore[0]
        new_image_id = str(uuid.uuid4())
        datastore = Datastore(id=image_datastore.name)
        src_image = Image("ttylinux", datastore)
        dst_image = Image(new_image_id, datastore)
        request = Host.CopyImageRequest(src_image, dst_image)
        response = self.host_client.copy_image(request)
        assert_that(response.result, is_(CopyImageResultCode.OK))

        def _thread():
            self._test_create_vm_with_ephemeral_disks(new_image_id,
                                                      concurrent=True,
                                                      new_client=True)
            with atmoic_lock:
                results["count"] += 1

        threads = []
        for i in range(concurrency):
            thread = threading.Thread(target=_thread)
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

        # Make sure the new image is copied to both datastores, and clean
        # them up.
        for ds in (image_datastore, dest_datastore):
            image = Image(datastore=Datastore(id=ds.name), id=new_image_id)
            self._delete_image(image)

        assert_that(results["count"], is_(concurrency))

    def test_place_on_datastore_tag(self):
        host_config_request = Host.GetConfigRequest()
        res = self.host_client.get_host_config(host_config_request)
        self.assertEqual(res.result, GetConfigResultCode.OK)

        datastores = res.hostConfig.datastores
        for datastore in datastores:
            tag = self._type_to_tag(datastore.type)
            if not tag:
                continue

            vm_wrapper = VmWrapper(self.host_client)

            # Test place disks with only datastore constraint
            disk = Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, False, True, capacity_gb=0)
            resource_constraints = self._create_constraints([datastore.id], [])
            vm_wrapper.place(vm_disks=[disk], vm_constraints=resource_constraints)

            # Test place disks with datastore and datastore tag constraint
            disk = Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, False, True, capacity_gb=0)
            resource_constraints = self._create_constraints([datastore.id], [tag])
            vm_wrapper.place(vm_disks=[disk], vm_constraints=resource_constraints, expect=PlaceResultCode.OK)

            # Test place disks with the wrong datastore tag
            for other_tag in self._other_tags(tag):
                disk = Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, False, True, capacity_gb=0)
                resource_constraints = self._create_constraints([datastore.id], [other_tag])
                vm_wrapper.place(vm_disks=[disk], vm_constraints=resource_constraints,
                                 expect=PlaceResultCode.NO_SUCH_RESOURCE)

    def test_provision_without_datastores(self):
        """
        Test that the host uses all the datastores when it gets provisioned
        without any datastores specified.
        """
        # provision the host without datastores
        datastores = self.get_all_datastores()
        self.provision_hosts(datastores=[], image_ds=datastores[0])

        # verify that the host configuration contains all the datastores.
        req = Host.GetConfigRequest()
        res = self.create_client().get_host_config(req)
        self.assertEqual(len(res.hostConfig.datastores),
                         len(self.vim_client.get_all_datastores()))

    def _manage_disk(self, op, **kwargs):
        task = op(self.vim_client._content.virtualDiskManager, **kwargs)
        self.vim_client.wait_for_task(task)

    def _gen_vd_spec(self):
        spec = vim.VirtualDiskManager.VirtualDiskSpec()
        spec.disk_type = str(vim.VirtualDiskManager.VirtualDiskType.thin)
        spec.adapterType = str(vim.VirtualDiskManager.VirtualDiskAdapterType.lsiLogic)
        return spec

    def test_finalize_image(self):
        """ Integration test for atomic image create """
        img_id = "test-create-image"
        tmp_img_id = "-tmp-" + img_id
        tmp_image, ds = self._create_test_image(tmp_img_id)
        tmp_image_path = datastore_path(ds.name, "image_" + tmp_img_id)
        src_vmdk = vmdk_path(ds.id, tmp_img_id, IMAGE_FOLDER_NAME_PREFIX)
        dst_vmdk = "%s/%s.vmdk" % (tmp_image_path, img_id)

        try:
            self._manage_disk(
                vim.VirtualDiskManager.MoveVirtualDisk_Task,
                sourceName=src_vmdk, destName=dst_vmdk, force=True)
        except:
            logger.error("Error moving vmdk %s" % src_vmdk,
                         exc_info=True)
            self._manage_disk(
                vim.VirtualDiskManager.DeleteVirtualDisk_Task,
                name=src_vmdk)
            raise
        dst_image = Image(img_id, ds)
        req = FinalizeImageRequest(image_id=img_id,
                                   datastore=ds.id,
                                   tmp_image_path=tmp_image_path)
        response = self.host_client.finalize_image(req)
        self.assertEqual(response.result, FinalizeImageResultCode.OK)
        request = Host.GetImagesRequest(ds.id)
        response = self.host_client.get_images(request)
        assert_that(response.result, is_(GetImagesResultCode.OK))
        assert_that(response.image_ids, has_item(img_id))

        # Issue another create call and it should fail as the source doesn't
        # exist.
        req = FinalizeImageRequest(image_id=img_id,
                                   datastore=ds.id,
                                   tmp_image_path=tmp_image_path)
        response = self.host_client.finalize_image(req)
        self.assertEqual(response.result,
                         FinalizeImageResultCode.IMAGE_NOT_FOUND)

        # Verify that we fail if the destination already exists.
        tmp_image, ds = self._create_test_image(tmp_img_id)
        req = FinalizeImageRequest(image_id=img_id,
                                   datastore=ds.id,
                                   tmp_image_path=tmp_image_path)
        response = self.host_client.finalize_image(req)
        self.assertEqual(response.result,
                         FinalizeImageResultCode.DESTINATION_ALREADY_EXIST)

        # cleanup
        self._delete_image(dst_image)

    def test_start_image_scanner(self):
        """
        Test image scanner. Make sure the idle images are reported correctly.
        """
        datastore = self._find_configured_datastore_in_host_config()

        image_id_1 = new_id()
        dst_image_1, _ = self._create_test_image(image_id_1)

        image_id_2 = new_id()
        dst_image_2, _ = self._create_test_image(image_id_2)
        disk_image_2 = DiskImage(image_id_2, CloneType.COPY_ON_WRITE)

        disks = [
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, False, True,
                 image=disk_image_2,
                 capacity_gb=0, flavor_info=self.DEFAULT_DISK_FLAVOR),
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, True, True,
                 capacity_gb=1, flavor_info=self.DEFAULT_DISK_FLAVOR),
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, True, True,
                 capacity_gb=2, flavor_info=self.DEFAULT_DISK_FLAVOR)
        ]
        vm_wrapper = VmWrapper(self.host_client)
        reservation = vm_wrapper.place_and_reserve(vm_disks=disks).reservation
        request = vm_wrapper.create_request(res_id=reservation)
        vm_wrapper.create(request=request)
        logger.info("Image scan, Vm id: %s" % vm_wrapper.id)

        # Start image scanner
        start_scan_request = StartImageScanRequest()
        start_scan_request.datastore_id = datastore.id
        start_scan_request.scan_rate = 600
        start_scan_request.timeout = 30
        start_scan_response = self.host_client.start_image_scan(start_scan_request)
        self.assertEqual(start_scan_response.result, StartImageOperationResultCode.OK)
        self._get_and_check_inactive_images(datastore.id, image_id_1, True)
        get_inactive_images_response = self._get_and_check_inactive_images(datastore.id, image_id_2, False)

        # Start image sweeper
        start_sweep_request = StartImageSweepRequest()
        start_sweep_request.datastore_id = datastore.id
        start_sweep_request.image_descs = get_inactive_images_response.image_descs
        start_sweep_request.sweep_rate = 600
        start_sweep_request.timeout = 30
        start_sweep_request.grace_period = 0
        start_sweep_response = self.host_client.start_image_sweep(start_sweep_request)
        self.assertEqual(start_sweep_response.result, StartImageOperationResultCode.OK)

        self._get_and_check_deleted_images(datastore.id, image_id_1, True)
        # cleanup
        vm_wrapper.delete()
        self._delete_image(dst_image_1, DeleteDirectoryResultCode.DIRECTORY_NOT_FOUND)
        self._delete_image(dst_image_2)

    def _get_and_check_inactive_images(self, datastore_id, image_id, found):
        get_inactive_images_request = GetInactiveImagesRequest()
        get_inactive_images_request.datastore_id = datastore_id
        for counter in range(1, 30):
            time.sleep(1)
            get_inactive_images_response = self.host_client.get_inactive_images(get_inactive_images_request)
            if get_inactive_images_response.result is GetMonitoredImagesResultCode.OK:
                break

        self.assertEqual(get_inactive_images_response.result, GetMonitoredImagesResultCode.OK)
        image_descriptors = get_inactive_images_response.image_descs
        image_found = False
        logger.info("Image Descriptors: %s" % image_descriptors)
        logger.info("Target Image Id: %s" % image_id)
        for image_descriptor in image_descriptors:
            if image_descriptor.image_id == image_id:
                image_found = True

        self.assertEqual(image_found, found)
        return get_inactive_images_response

    def _get_and_check_deleted_images(self, datastore_id, image_id, found):
        get_deleted_images_request = GetInactiveImagesRequest()
        get_deleted_images_request.datastore_id = datastore_id
        for counter in range(1, 30):
            time.sleep(1)
            get_inactive_deleted_response = self.host_client.get_deleted_images(get_deleted_images_request)
            if get_inactive_deleted_response.result is GetMonitoredImagesResultCode.OK:
                break

        self.assertEqual(get_inactive_deleted_response.result, GetMonitoredImagesResultCode.OK)
        image_descriptors = get_inactive_deleted_response.image_descs
        image_found = False
        logger.info("Image Descriptors: %s" % image_descriptors)
        logger.info("Target Image Id: %s" % image_id)
        for image_descriptor in image_descriptors:
            if image_descriptor.image_id == image_id:
                image_found = True

        self.assertEqual(image_found, found)
        return get_inactive_deleted_response

    def _type_to_tag(self, type):
        type_to_tag = {
            DatastoreType.NFS_3: NFS_TAG,
            DatastoreType.NFS_41: NFS_TAG,
            DatastoreType.SHARED_VMFS: SHARED_VMFS_TAG,
            DatastoreType.LOCAL_VMFS: LOCAL_VMFS_TAG,
        }

        if type in type_to_tag:
            return type_to_tag[type]
        else:
            return None

    def _other_tags(self, tag):
        tags = [NFS_TAG, SHARED_VMFS_TAG, LOCAL_VMFS_TAG]
        tags.remove(tag)
        return tags

    def _create_constraints(self, datastores, tags):
        constraints = []
        for datastore in datastores:
            constraints.append(ResourceConstraint(
                type=ResourceConstraintType.DATASTORE,
                values=[datastore]))
        for tag in tags:
            constraints.append(ResourceConstraint(
                type=ResourceConstraintType.DATASTORE_TAG,
                values=[tag]))
        return constraints

    def test_create_image_from_vm(self):
        """ Integration test for creating an image from a VM """
        img_id = "test-new-im-from-vm-%s" % new_id()
        tmp_img_id = "-tmp-" + img_id
        tmp_image, ds = self._create_test_image(tmp_img_id)

        tmp_image_path = datastore_path(ds.id, "image_" + tmp_img_id)
        src_vmdk = vmdk_path(ds.id, tmp_img_id, IMAGE_FOLDER_NAME_PREFIX)
        vm_wrapper = VmWrapper(self.host_client)

        try:
            self._manage_disk(
                vim.VirtualDiskManager.DeleteVirtualDisk_Task,
                name=src_vmdk)
        except:
            logger.error(
                "Error deleting vmdk when setting up tmp image %s" % src_vmdk,
                exc_info=True)
            raise

        dst_image = Image(img_id, ds)

        image = DiskImage("ttylinux", CloneType.COPY_ON_WRITE)
        disks = [
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, False, True,
                 image=image,
                 capacity_gb=0, flavor_info=self.DEFAULT_DISK_FLAVOR),
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, True, True,
                 capacity_gb=1, flavor_info=self.DEFAULT_DISK_FLAVOR),
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, True, True,
                 capacity_gb=2, flavor_info=self.DEFAULT_DISK_FLAVOR)
        ]
        reservation = vm_wrapper.place_and_reserve(vm_disks=disks).reservation
        request = vm_wrapper.create_request(res_id=reservation)
        vm_wrapper.create(request=request)

        # VM in wrong state
        vm_wrapper.power(Host.PowerVmOp.ON, Host.PowerVmOpResultCode.OK)
        time.sleep(10)
        vm_wrapper.create_image_from_vm(
            image_id=img_id,
            datastore=ds.id,
            tmp_image_path=tmp_image_path,
            expect=Host.CreateImageFromVmResultCode.INVALID_VM_POWER_STATE)

        vm_wrapper.power(Host.PowerVmOp.OFF, Host.PowerVmOpResultCode.OK)
        time.sleep(10)

        # Happy case
        vm_wrapper.create_image_from_vm(
            image_id=img_id,
            datastore=ds.id,
            tmp_image_path=tmp_image_path,
            expect=Host.CreateImageFromVmResultCode.OK)

        request = Host.GetImagesRequest(ds.id)
        response = self.host_client.get_images(request)
        assert_that(response.result, is_(GetImagesResultCode.OK))
        assert_that(response.image_ids, has_item(img_id))

        # Issue another create call and it should fail as the source doesn't
        # exist.
        req = FinalizeImageRequest(image_id=img_id,
                                   datastore=ds.id,
                                   tmp_image_path=tmp_image_path)
        response = self.host_client.finalize_image(req)
        self.assertEqual(response.result,
                         FinalizeImageResultCode.IMAGE_NOT_FOUND)

        # Verify that we fail if the destination already exists.
        tmp_image, ds = self._create_test_image(tmp_img_id)
        vm_wrapper.create_image_from_vm(
            image_id=tmp_img_id,
            datastore=ds.id,
            tmp_image_path=tmp_image_path,
            expect=Host.CreateImageFromVmResultCode.IMAGE_ALREADY_EXIST)

        vm_wrapper.delete()

        # VM to create image from is gone.
        vm_wrapper.create_image_from_vm(
            image_id=img_id,
            datastore=ds.id,
            tmp_image_path=tmp_image_path,
            expect=Host.CreateImageFromVmResultCode.VM_NOT_FOUND)

        # Create a VM using the new image created
        vm_wrapper2 = VmWrapper(self.host_client)
        image = DiskImage(img_id, CloneType.COPY_ON_WRITE)
        disks = [
            Disk(new_id(), self.DEFAULT_DISK_FLAVOR.name, False, True,
                 image=image,
                 capacity_gb=0, flavor_info=self.DEFAULT_DISK_FLAVOR),
        ]
        reservation = vm_wrapper2.place_and_reserve(vm_disks=disks).reservation
        request = vm_wrapper2.create_request(res_id=reservation)
        vm_wrapper2.create(request=request)
        vm_wrapper2.power(Host.PowerVmOp.ON, Host.PowerVmOpResultCode.OK)
        vm_wrapper2.power(Host.PowerVmOp.OFF, Host.PowerVmOpResultCode.OK)
        vm_wrapper2.delete()

        # cleanup
        self._delete_image(dst_image)

    def test_delete_tmp_image(self):
        """ Integration test for deleting temp image directory """
        img_id = "test-delete-tmp-image"
        tmp_iamge, ds = self._create_test_image(img_id)
        tmp_image_path = "image_" + img_id
        req = DeleteDirectoryRequest(datastore=ds.id,
                                     directory_path=tmp_image_path)
        res = self.host_client.delete_directory(req)
        self.assertEqual(res.result, DeleteDirectoryResultCode.OK)

        req = DeleteDirectoryRequest(datastore=ds.id,
                                     directory_path=tmp_image_path)
        res = self.host_client.delete_directory(req)
        self.assertEqual(res.result,
                         DeleteDirectoryResultCode.DIRECTORY_NOT_FOUND)

        req = DeleteDirectoryRequest(datastore="foo_bar",
                                     directory_path=tmp_image_path)
        res = self.host_client.delete_directory(req)
        self.assertEqual(res.result,
                         DeleteDirectoryResultCode.DATASTORE_NOT_FOUND)

    def _get_agent_id(self):
        host_config_request = Host.GetConfigRequest()
        res = self.host_client.get_host_config(host_config_request)
        return res.hostConfig.agent_id
コード例 #4
0
class TestHttpTransfer(unittest.TestCase):
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]
        self.agent_port = config["host_remote_test"].get("agent_port", 8835)
        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.image_datastore = config["host_remote_test"].get("image_datastore", "datastore1")

        self._logger = logging.getLogger(__name__)
        self.vim_client = VimClient(self.host, "root", self.pwd)
        self.http_transferer = HttpNfcTransferer(self.vim_client, self.image_datastore, self.host)

        with tempfile.NamedTemporaryFile(delete=False) as source_file:
            with open(source_file.name, "wb") as f:
                f.write(os.urandom(1024 * 100))
        self.random_file = source_file.name

        self.remote_files_to_delete = []

    def _cleanup_remote_files(self):
        file_manager = self.vim_client._content.fileManager
        for ds_path in self.remote_files_to_delete:
            try:
                delete_task = file_manager.DeleteFile(ds_path, None)
                task.WaitForTask(delete_task)
            except:
                pass

    def tearDown(self):
        os.unlink(self.random_file)
        self._cleanup_remote_files()
        self.vim_client.disconnect(wait=True)

    def _remote_ds_path(self, ds, relpath):
        return "[%s] %s" % (ds, relpath)

    def _datastore_path_url(self, datastore, relpath):
        quoted_dc_name = "ha%252ddatacenter"
        url = "https://%s/folder/%s?dcPath=%s&dsName=%s" % (self.host, relpath, quoted_dc_name, datastore)
        return url

    def test_download_missing_file(self):
        url = self._datastore_path_url(self.image_datastore, "_missing_file_.bin")
        ticket = self.http_transferer._get_cgi_ticket(self.host, self.agent_port, url, http_op=HttpOp.GET)
        with tempfile.NamedTemporaryFile(delete=True) as local_file:
            self.assertRaises(
                TransferException, self.http_transferer.download_file, url, local_file.name, ticket=ticket
            )

    def test_upload_file_bad_destination(self):
        url = self._datastore_path_url("_missing__datastore_", "random.bin")
        ticket = self.http_transferer._get_cgi_ticket(self.host, self.agent_port, url, http_op=HttpOp.PUT)
        self.assertRaises(TransferException, self.http_transferer.upload_file, self.random_file, url, ticket=ticket)

    def test_raw_file_transfer_roundtrip(self):
        relpath = "_test_http_xfer_random.bin"
        url = self._datastore_path_url(self.image_datastore, relpath)
        ticket = self.http_transferer._get_cgi_ticket(self.host, self.agent_port, url, http_op=HttpOp.PUT)
        self.http_transferer.upload_file(self.random_file, url, ticket=ticket)

        self.remote_files_to_delete.append(self._remote_ds_path(self.image_datastore, relpath))

        ticket = self.http_transferer._get_cgi_ticket(self.host, self.agent_port, url, http_op=HttpOp.GET)
        with tempfile.NamedTemporaryFile(delete=True) as downloaded_file:
            self.http_transferer.download_file(url, downloaded_file.name, ticket=ticket)
            # check that file uploaded and immediately downloaded back is
            # identical to the source file used.
            assert_that(filecmp.cmp(self.random_file, downloaded_file.name, shallow=False), is_(True))

    @patch("os.path.exists", return_value=True)
    def test_get_streamoptimized_image_stream(self, _exists):
        image_id = "ttylinux"
        lease, url = self.http_transferer._get_image_stream_from_shadow_vm(image_id)
        try:
            with tempfile.NamedTemporaryFile(delete=True) as downloaded_file:
                # see if we can download without errors
                self.http_transferer.download_file(url, downloaded_file.name)
                # check that the first part of the file looks like that from a
                # stream-optimized disk
                with open(downloaded_file.name, "rb") as f:
                    data = f.read(65536)
                    assert_that(len(data), is_(65536))
                    regex = re.compile("streamOptimized", re.IGNORECASE | re.MULTILINE)
                    matches = regex.findall(data)
                    assert_that(matches, not (empty()))
        finally:
            lease.Complete()

    def test_send_image_to_host(self):
        image_id = "ttylinux"
        tmp_vmdk_path = "/tmp/test_send_image_%s.vmdk" % str(uuid.uuid4())
        self.http_transferer.send_image_to_host(
            image_id, self.image_datastore, self.host, self.agent_port, intermediate_file_path=tmp_vmdk_path
        )

        vim_vm = self.vim_client.get_vm(image_id)
        vim_task = vim_vm.Destroy()
        self.vim_client.wait_for_task(vim_task)
コード例 #5
0
class TestVimClient(unittest.TestCase):
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(self.host, "root", self.pwd,
                                    auto_sync=True)
        self.vm_config = EsxVmConfig(self.vim_client)
        self._logger = logging.getLogger(__name__)

    def tearDown(self):
        self.vim_client.disconnect(wait=True)

    def test_memory_usage(self):
        used_memory = self.vim_client.memory_usage_mb
        assert_that(used_memory > 0, is_(True))

    def test_total_memory(self):
        total_memory = self.vim_client.total_vmusable_memory_mb
        assert_that(total_memory > 0, is_(True))

    def test_total_cpus(self):
        num_cpus = self.vim_client.num_physical_cpus
        assert_that(num_cpus > 0, is_(True))

    def _create_test_vm(self, suffix="host-integ"):
        # Create VM
        vm_id = "%s-%s-%s" % (
            time.strftime("%Y-%m-%d-%H%M%S", time.localtime()),
            str(random.randint(100000, 1000000)),
            suffix)

        datastore = self.vim_client.get_datastore().name
        disk_path = "[%s] %s/disk.vmdk" % (datastore, vm_id)
        create_spec = self.get_create_spec(datastore, vm_id, disk_path)
        folder = self.vim_client.vm_folder
        resource_pool = self.vim_client.root_resource_pool
        task = folder.CreateVm(create_spec, resource_pool, None)
        self.vim_client.wait_for_task(task)
        vm = self.vim_client.get_vm(vm_id)
        return (vm_id, vm, datastore, disk_path)

    def test_get_cached_vm(self):
        vm_id, vm, datastore, disk_path = self._create_test_vm("vm-cache-test")

        # Verify VM is in cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOff))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Make sure get_vm_in_cache works
        vm_from_cache = self.vim_client.get_vm_in_cache(vm_id)
        assert_that(vm_from_cache.name, is_(vm_id))
        self.assertRaises(VmNotFoundException,
                          self.vim_client.get_vm_in_cache, "missing")

        # Add disk
        disk2_path = "[%s] %s/disk2.vmdk" % (datastore, vm_id)
        update_spec = self.get_update_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(update_spec)
        self.vim_client.wait_for_task(task)

        # For the ReconfigVM task to remove disk, the hostd could update
        # task status to success before updating VM status. Thus when
        # wait_for_task returns, the vm_cache is possible to be still in old
        # state, though eventually it converges to consistent state. It only
        # happens in this task AFAIK. It should be fine for this task, because
        # rarely there is other operation that depends on this task.
        self._wait_vm_has_disk(vm_id, 2)

        # Verify disk added
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms[0].disks), is_(2))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path, disk2_path))

        # Remove disk
        vm = self.vim_client.get_vm(vm_id)
        remove_spec = self.get_remove_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(remove_spec)
        self.vim_client.wait_for_task(task)

        # Same as before when disk is added
        self._wait_vm_has_disk(vm_id, 1)

        # Verify disk removed
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(len(found_vms[0].disks), is_(1), "disk2 in " +
                                                     str(found_vms[0].disks))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path))

        # Power on vm
        task = vm.PowerOn()
        self.vim_client.wait_for_task(task)

        # Wait until it disappears from the cache
        self._wait_vm_power_status(vm_id, PowerState.poweredOn)

        # Verify VM state in cache is updated
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].power_state, is_(PowerState.poweredOn))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Destroy VM
        task = vm.PowerOff()
        self.vim_client.wait_for_task(task)
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # Verify VM is deleted from cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(0))

    def test_no_datastore_update(self):
        """ Test datastore update is no longer triggered on VM creates/deletes
        """

        class UpdateListener(object):
            def __init__(self):
                self._ds_update_count = 0

            def datastores_updated(self):
                self._ds_update_count += 1

            def networks_updated(self):
                pass

            def virtual_machines_updated(self):
                pass

        listener = UpdateListener()
        self.vim_client.add_update_listener(listener)
        # listener always gets updated once on add
        assert_that(listener._ds_update_count, is_(1))

        mock_apply = MagicMock(wraps=self.vim_client._apply_ds_update)
        self.vim_client._apply_ds_update = mock_apply

        _, vm, _, _ = self._create_test_vm("ds-update-test")
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # expect to get a datastore property update (unfortunately) ...
        for _ in xrange(50):
            if mock_apply.call_count > 0:
                break
            time.sleep(0.1)
        # ... but that additional datastore updated notifications are sent out
        # as a result
        assert_that(listener._ds_update_count, is_(1))

    def get_create_spec(self, datastore, vm_id, disk_path):
        create_spec = vim.vm.ConfigSpec(
            name=vm_id,
            guestId="otherGuest",
            memoryMB=64,
            numCPUs=2,
            files=vim.vm.FileInfo(vmPathName="[%s] /" % datastore),
            deviceChange=[],
        )
        controller = vim.vm.device.VirtualLsiLogicController(
            key=1,
            sharedBus=vim.vm.device.VirtualSCSIController.Sharing.noSharing,
            busNumber=2,
            unitNumber=-1)
        self.vm_config.add_device(create_spec, controller)
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        disk = vim.vm.device.VirtualDisk(
            controllerKey=1,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(create_spec, disk)
        return create_spec

    def get_update_spec(self, vm_info, disk_path):
        update_spec = vim.vm.ConfigSpec()
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        controller = \
            self.vm_config._find_scsi_controller(update_spec,
                                                 vm_info.config)
        disk = vim.vm.device.VirtualDisk(
            controllerKey=controller.key,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        self.vm_config.create_device(update_spec, disk)
        return update_spec

    def get_remove_spec(self, vm_info, disk_path):
        remove_spec = vim.vm.ConfigSpec()
        devices = self.vm_config.get_devices_from_config(vm_info.config)
        found_device = None
        for device in devices:
            if isinstance(device, vim.vm.device.VirtualDisk) and \
                    device.backing.fileName.endswith(disk_path):
                found_device = device
        self.vm_config.remove_device(remove_spec, found_device)
        return remove_spec

    def test_clone_ticket(self):
        ticket = self.vim_client.acquire_clone_ticket()
        vim_client2 = VimClient(host=self.host, ticket=ticket)
        vim_client2.host_system

    def test_http_ticket(self):
        datastore = self.vim_client.get_datastore().name
        filename = "%s.bin" % str(uuid.uuid4())
        quoted_dc_name = 'ha%252ddatacenter'
        url = 'https://%s/folder/%s?dcPath=%s&dsName=%s' % (
            self.host, filename, quoted_dc_name, datastore)

        ticket = self.vim_client.acquire_cgi_ticket(url, HttpOp.PUT)
        assert_that(ticket, is_not(equal_to(None)))

    def test_host_stats(self):
        """ Skip host stats test.
        This test does not agree with the contract exposed from
        the implementation.
        Until the vim_client code be refactor/cleanup, disable this test for
        now.
        """
        raise SkipTest()

        self.vim_client.initialize_host_counters()
        self.vim_client.update_hosts_stats()
        stats = self.vim_client.get_host_stats()
        assert_that(has_key('mem.consumed'))
        assert_that(stats['mem.consumed'], greater_than(0))
        assert_that(has_key('rescpu.actav1'))
        assert_that(stats['rescpu.actav1'], greater_than(0))

    def _wait_vm_has_disk(self, vm_id, disk_num):
        """Wait until the vm has disk number of the vm becomes disk_num
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if len(vm_in_cache.disks) == disk_num:
                self._logger.info("VmCache disk number synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)

    def _wait_vm_power_status(self, vm_id, power_state):
        """Wait until the vm has power_state
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if vm_in_cache.power_state == power_state:
                self._logger.info("VmCache power_state synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)
コード例 #6
0
class TestVimClient(unittest.TestCase):
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(auto_sync=True)
        self.vim_client.connect_userpwd(self.host, "root", self.pwd)
        self._logger = logging.getLogger(__name__)

    def tearDown(self):
        self.vim_client.disconnect()

    def test_memory_usage(self):
        used_memory = self.vim_client.memory_usage_mb
        assert_that(used_memory > 0, is_(True))

    def test_total_memory(self):
        total_memory = self.vim_client.total_vmusable_memory_mb
        assert_that(total_memory > 0, is_(True))

    def test_total_cpus(self):
        num_cpus = self.vim_client.num_physical_cpus
        assert_that(num_cpus > 0, is_(True))

    def _create_test_vm(self, suffix="host-integ"):
        # Create VM
        vm_id = "vm_%s-%s-%s" % (time.strftime("%Y-%m-%d-%H%M%S",
                                               time.localtime()),
                                 str(random.randint(100000, 1000000)), suffix)

        datastore = self.vim_client.get_all_datastores()[0].name
        disk_path = "[%s] %s/disk.vmdk" % (datastore, vm_id)
        create_spec = self.get_create_spec(datastore, vm_id, disk_path)
        self.vim_client.create_vm(vm_id, create_spec)
        vm = self.vim_client.get_vm(vm_id)
        return (vm_id, vm, datastore, disk_path)

    def test_get_cached_vm(self):
        vm_id, vm, datastore, disk_path = self._create_test_vm("vm-cache-test")

        # Verify VM is in cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].power_state, is_(VmPowerState.STOPPED))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Make sure get_vm_in_cache works
        vm_from_cache = self.vim_client.get_vm_in_cache(vm_id)
        assert_that(vm_from_cache.name, is_(vm_id))
        self.assertRaises(VmNotFoundException, self.vim_client.get_vm_in_cache,
                          "missing")

        # Add disk
        disk2_path = "[%s] %s/disk2.vmdk" % (datastore, vm_id)
        update_spec = self.get_update_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(update_spec.get_spec())
        self.vim_client.wait_for_task(task)

        # For the ReconfigVM task to remove disk, the hostd could update
        # task status to success before updating VM status. Thus when
        # wait_for_task returns, the vm_cache is possible to be still in old
        # state, though eventually it converges to consistent state. It only
        # happens in this task AFAIK. It should be fine for this task, because
        # rarely there is other operation that depends on this task.
        self._wait_vm_has_disk(vm_id, 2)

        # Verify disk added
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms[0].disks), is_(2))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path, disk2_path))

        # Remove disk
        vm = self.vim_client.get_vm(vm_id)
        remove_spec = self.get_remove_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(remove_spec.get_spec())
        self.vim_client.wait_for_task(task)

        # Same as before when disk is added
        self._wait_vm_has_disk(vm_id, 1)

        # Verify disk removed
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(len(found_vms[0].disks), is_(1),
                    "disk2 in " + str(found_vms[0].disks))
        assert_that(found_vms[0].disks, contains_inanyorder(disk_path))

        # Power on vm
        task = vm.PowerOn()
        self.vim_client.wait_for_task(task)

        # Wait until it disappears from the cache
        self._wait_vm_power_status(vm_id, VmPowerState.STARTED)

        # Verify VM state in cache is updated
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].power_state, is_(VmPowerState.STARTED))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Destroy VM
        task = vm.PowerOff()
        self.vim_client.wait_for_task(task)
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # Verify VM is deleted from cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(0))

    def test_no_datastore_update(self):
        """ Test datastore update is no longer triggered on VM creates/deletes
        """
        class UpdateListener(object):
            def __init__(self):
                self._ds_update_count = 0

            def datastores_updated(self):
                self._ds_update_count += 1

        listener = UpdateListener()
        self.vim_client.add_update_listener(listener)
        # listener always gets updated once on add
        assert_that(listener._ds_update_count, is_(1))

        mock_apply = MagicMock(
            wraps=self.vim_client._vim_cache._update_ds_cache)
        self.vim_client._vim_cache._update_ds_cache = mock_apply

        _, vm, _, _ = self._create_test_vm("ds-update-test")
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # expect to get a datastore property update (unfortunately) ...
        for _ in xrange(50):
            if mock_apply.call_count > 0:
                break
            time.sleep(0.1)
        # ... but that additional datastore updated notifications are sent out
        # as a result
        assert_that(listener._ds_update_count, is_(1))

    def get_create_spec(self, datastore, vm_id, disk_path):
        create_spec = EsxVmConfigSpec(None)
        create_spec.init_for_create(vm_id, datastore, 64, 2)
        create_spec._cfg_spec.files = vim.vm.FileInfo(vmPathName="[%s] /" %
                                                      datastore)
        controller = vim.vm.device.VirtualLsiLogicController(
            key=1,
            sharedBus=vim.vm.device.VirtualSCSIController.Sharing.noSharing,
            busNumber=2,
            unitNumber=-1)
        create_spec._add_device(controller)
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent)
        disk = vim.vm.device.VirtualDisk(
            controllerKey=1,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        create_spec._create_device(disk)
        return create_spec

    def get_update_spec(self, vm_info, disk_path):
        update_spec = EsxVmConfigSpec(None)
        update_spec.init_for_update()
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent)
        controller = update_spec._find_scsi_controller(vm_info.config)
        disk = vim.vm.device.VirtualDisk(
            controllerKey=controller.key,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        update_spec._create_device(disk)
        return update_spec

    def get_remove_spec(self, vm_info, disk_path):
        remove_spec = EsxVmConfigSpec(None)
        remove_spec.init_for_update()
        devices = remove_spec._get_devices_by_type(vm_info.config,
                                                   vim.vm.device.VirtualDisk)
        found_device = None
        for device in devices:
            if device.backing.fileName.endswith(disk_path):
                found_device = device
        remove_spec._remove_device(found_device)
        return remove_spec

    def test_clone_ticket(self):
        ticket = self.vim_client.get_vim_ticket()
        vim_client2 = VimClient()
        vim_client2.connect_ticket(self.host, ticket)
        vim_client2.host_system()

    def _wait_vm_has_disk(self, vm_id, disk_num):
        """Wait until the vm has disk number of the vm becomes disk_num
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if len(vm_in_cache.disks) == disk_num:
                self._logger.info("VmCache disk number synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)

    def _wait_vm_power_status(self, vm_id, power_state):
        """Wait until the vm has power_state
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if vm_in_cache.power_state == power_state:
                self._logger.info("VmCache power_state synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)
コード例 #7
0
class TestVimClient(unittest.TestCase):
    def setUp(self):
        if "host_remote_test" not in config:
            raise SkipTest()

        self.host = config["host_remote_test"]["server"]
        self.pwd = config["host_remote_test"]["esx_pwd"]

        if self.host is None or self.pwd is None:
            raise SkipTest()

        self.vim_client = VimClient(auto_sync=True)
        self.vim_client.connect_userpwd(self.host, "root", self.pwd)
        self._logger = logging.getLogger(__name__)

    def tearDown(self):
        self.vim_client.disconnect()

    def test_memory_usage(self):
        used_memory = self.vim_client.memory_usage_mb
        assert_that(used_memory > 0, is_(True))

    def test_total_memory(self):
        total_memory = self.vim_client.total_vmusable_memory_mb
        assert_that(total_memory > 0, is_(True))

    def test_total_cpus(self):
        num_cpus = self.vim_client.num_physical_cpus
        assert_that(num_cpus > 0, is_(True))

    def _create_test_vm(self, suffix="host-integ"):
        # Create VM
        vm_id = "vm_%s-%s-%s" % (
            time.strftime("%Y-%m-%d-%H%M%S", time.localtime()),
            str(random.randint(100000, 1000000)),
            suffix)

        datastore = self.vim_client.get_all_datastores()[0].name
        disk_path = "[%s] %s/disk.vmdk" % (datastore, vm_id)
        create_spec = self.get_create_spec(datastore, vm_id, disk_path)
        self.vim_client.create_vm(vm_id, create_spec)
        vm = self.vim_client.get_vm(vm_id)
        return (vm_id, vm, datastore, disk_path)

    def test_get_cached_vm(self):
        vm_id, vm, datastore, disk_path = self._create_test_vm("vm-cache-test")

        # Verify VM is in cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].power_state, is_(VmPowerState.STOPPED))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Make sure get_vm_in_cache works
        vm_from_cache = self.vim_client.get_vm_in_cache(vm_id)
        assert_that(vm_from_cache.name, is_(vm_id))
        self.assertRaises(VmNotFoundException,
                          self.vim_client.get_vm_in_cache, "missing")

        # Add disk
        disk2_path = "[%s] %s/disk2.vmdk" % (datastore, vm_id)
        update_spec = self.get_update_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(update_spec.get_spec())
        self.vim_client.wait_for_task(task)

        # For the ReconfigVM task to remove disk, the hostd could update
        # task status to success before updating VM status. Thus when
        # wait_for_task returns, the vm_cache is possible to be still in old
        # state, though eventually it converges to consistent state. It only
        # happens in this task AFAIK. It should be fine for this task, because
        # rarely there is other operation that depends on this task.
        self._wait_vm_has_disk(vm_id, 2)

        # Verify disk added
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms[0].disks), is_(2))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path, disk2_path))

        # Remove disk
        vm = self.vim_client.get_vm(vm_id)
        remove_spec = self.get_remove_spec(vm, disk2_path)
        task = vm.ReconfigVM_Task(remove_spec.get_spec())
        self.vim_client.wait_for_task(task)

        # Same as before when disk is added
        self._wait_vm_has_disk(vm_id, 1)

        # Verify disk removed
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(len(found_vms[0].disks), is_(1), "disk2 in " +
                                                     str(found_vms[0].disks))
        assert_that(found_vms[0].disks,
                    contains_inanyorder(disk_path))

        # Power on vm
        task = vm.PowerOn()
        self.vim_client.wait_for_task(task)

        # Wait until it disappears from the cache
        self._wait_vm_power_status(vm_id, VmPowerState.STARTED)

        # Verify VM state in cache is updated
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(1))
        assert_that(found_vms[0].power_state, is_(VmPowerState.STARTED))
        assert_that(found_vms[0].name, is_(vm_id))
        assert_that(found_vms[0].memory_mb, is_(64))
        assert_that(found_vms[0].path, starts_with("[%s]" % datastore))
        assert_that(len(found_vms[0].disks), is_(1))
        assert_that(found_vms[0].disks[0], is_(disk_path))

        # Destroy VM
        task = vm.PowerOff()
        self.vim_client.wait_for_task(task)
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # Verify VM is deleted from cache
        vms = self.vim_client.get_vms_in_cache()
        found_vms = [v for v in vms if v.name == vm_id]
        assert_that(len(found_vms), is_(0))

    def test_no_datastore_update(self):
        """ Test datastore update is no longer triggered on VM creates/deletes
        """

        class UpdateListener(object):
            def __init__(self):
                self._ds_update_count = 0

            def datastores_updated(self):
                self._ds_update_count += 1

        listener = UpdateListener()
        self.vim_client.add_update_listener(listener)
        # listener always gets updated once on add
        assert_that(listener._ds_update_count, is_(1))

        mock_apply = MagicMock(wraps=self.vim_client._vim_cache._update_ds_cache)
        self.vim_client._vim_cache._update_ds_cache = mock_apply

        _, vm, _, _ = self._create_test_vm("ds-update-test")
        task = vm.Destroy()
        self.vim_client.wait_for_task(task)

        # expect to get a datastore property update (unfortunately) ...
        for _ in xrange(50):
            if mock_apply.call_count > 0:
                break
            time.sleep(0.1)
        # ... but that additional datastore updated notifications are sent out
        # as a result
        assert_that(listener._ds_update_count, is_(1))

    def get_create_spec(self, datastore, vm_id, disk_path):
        create_spec = EsxVmConfigSpec(None)
        create_spec.init_for_create(vm_id, datastore, 64, 2)
        create_spec._cfg_spec.files = vim.vm.FileInfo(vmPathName="[%s] /" % datastore)
        controller = vim.vm.device.VirtualLsiLogicController(
            key=1,
            sharedBus=vim.vm.device.VirtualSCSIController.Sharing.noSharing,
            busNumber=2,
            unitNumber=-1)
        create_spec._add_device(controller)
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        disk = vim.vm.device.VirtualDisk(
            controllerKey=1,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        create_spec._create_device(disk)
        return create_spec

    def get_update_spec(self, vm_info, disk_path):
        update_spec = EsxVmConfigSpec(None)
        update_spec.init_for_update()
        backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            fileName=disk_path,
            diskMode=vim.vm.device.VirtualDiskOption.DiskMode.persistent
        )
        controller = update_spec._find_scsi_controller(vm_info.config)
        disk = vim.vm.device.VirtualDisk(
            controllerKey=controller.key,
            key=-1,
            unitNumber=-1,
            backing=backing,
            capacityInKB=1024,
        )
        update_spec._create_device(disk)
        return update_spec

    def get_remove_spec(self, vm_info, disk_path):
        remove_spec = EsxVmConfigSpec(None)
        remove_spec.init_for_update()
        devices = remove_spec._get_devices_by_type(vm_info.config, vim.vm.device.VirtualDisk)
        found_device = None
        for device in devices:
            if device.backing.fileName.endswith(disk_path):
                found_device = device
        remove_spec._remove_device(found_device)
        return remove_spec

    def test_clone_ticket(self):
        ticket = self.vim_client.get_vim_ticket()
        vim_client2 = VimClient()
        vim_client2.connect_ticket(self.host, ticket)
        vim_client2.host_system()

    def _wait_vm_has_disk(self, vm_id, disk_num):
        """Wait until the vm has disk number of the vm becomes disk_num
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if len(vm_in_cache.disks) == disk_num:
                self._logger.info("VmCache disk number synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)

    def _wait_vm_power_status(self, vm_id, power_state):
        """Wait until the vm has power_state
        """
        now = time.time()
        for _ in xrange(50):
            vm_in_cache = self.vim_client.get_vm_in_cache(vm_id)
            if vm_in_cache.power_state == power_state:
                self._logger.info("VmCache power_state synced in %.2f second" %
                                  (time.time() - now))
                break
            time.sleep(0.1)