コード例 #1
0
class HttpNfcTransferer(HttpTransferer):
    """ Class for handling HTTP-based disk transfers between ESX hosts.

    This class employs the ImportVApp and ExportVM APIs to transfer
    VMDKs efficiently to another host. A shadow VM is created and used in the
    initial export of the VMDK into the stream optimized format needed by
    ImportVApp.

    """

    LEASE_INITIALIZATION_WAIT_SECS = 10

    def __init__(self, vim_client, image_datastores, host_name="localhost"):
        super(HttpNfcTransferer, self).__init__(vim_client)
        self.lock = threading.Lock()
        self._shadow_vm_id = "shadow_%s" % self._vim_client.host_uuid
        self._lease_url_host_name = host_name
        self._image_datastores = image_datastores
        self._vm_config = EsxVmConfig(self._vim_client)
        self._vm_manager = EsxVmManager(self._vim_client, None)

    def _get_remote_connections(self, host, port):
        agent_client = DirectClient("Host", Host.Client, host, port)
        agent_client.connect()
        request = ServiceTicketRequest(service_type=ServiceType.VIM)
        response = agent_client.get_service_ticket(request)
        if response.result != ServiceTicketResultCode.OK:
            self._logger.info("Get service ticket failed. Response = %s" %
                              str(response))
            raise ValueError("No ticket")
        vim_client = VimClient(
            host=host, ticket=response.vim_ticket, auto_sync=False)
        return agent_client, vim_client

    def _get_disk_url_from_lease(self, lease):
        for dev_url in lease.info.deviceUrl:
            self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url))
            return dev_url.url

    def _wait_for_lease(self, lease):
        retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS
        state = None
        while retries > 0:
            state = lease.state
            if state != vim.HttpNfcLease.State.initializing:
                break
            retries -= 1
            time.sleep(1)

        if retries == 0:
            self._logger.debug("Nfc lease initialization timed out")
            raise NfcLeaseInitiatizationTimeout()
        if state == vim.HttpNfcLease.State.error:
            self._logger.debug("Fail to initialize nfc lease: %s" %
                               str(lease.error))
            raise NfcLeaseInitiatizationError()

    def _ensure_host_in_url(self, url, actual_host):

        # URLs from vApp export/import leases have '*' as placeholder
        # for host names that has to be replaced with the actual
        # host on which the resource resides.
        protocol, host, selector = self._split_url(url)
        if host.find("*") != -1:
            host = host.replace("*", actual_host)
        return "%s://%s%s" % (protocol, host, selector)

    def _export_shadow_vm(self):
        """ Initiates the Export VM operation.

        The lease created as part of ExportVM contains, among other things,
        the url to the stream-optimized disk of the image currently associated
        with the VM being exported.
        """
        vm = self._vim_client.get_vm_obj_in_cache(self._shadow_vm_id)
        lease = vm.ExportVm()
        self._wait_for_lease(lease)
        return lease, self._get_disk_url_from_lease(lease)

    def _get_shadow_vm_datastore(self):
        # The datastore in which the shadow VM will be created.
        return self._image_datastores[0]

    def _ensure_shadow_vm(self):
        """ Creates a shadow vm specifically for use by this host if absent.

        The shadow VM created is used to facilitate host-to-host transfer
        of any image accessible on this host to another datastore not directly
        accessible from this host.
        """
        vm_id = self._shadow_vm_id
        if self._vm_manager.has_vm(vm_id):
            self._logger.debug("shadow vm exists")
            return

        spec = self._vm_config.create_spec(
            vm_id=vm_id, datastore=self._get_shadow_vm_datastore(),
            memory=32, cpus=1)
        try:
            self._vm_manager.create_vm(vm_id, spec)
        except Exception:
            self._logger.exception("Error creating vm with id %s" % vm_id)
            raise

    def _configure_shadow_vm_with_disk(self, image_id, image_datastore):
        """ Reconfigures the shadow vm to contain only one image disk. """
        try:
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(self._shadow_vm_id)
            self._vm_manager.remove_all_disks(spec, info)
            self._vm_manager.add_disk(spec, image_datastore, image_id, info,
                                      disk_is_image=True)
            self._vm_manager.update_vm(self._shadow_vm_id, spec)
        except Exception:
            self._logger.exception(
                "Error configuring shadow vm with image %s" % image_id)
            raise

    def _get_image_stream_from_shadow_vm(self, image_id, image_datastore):
        """ Obtain a handle to the streamOptimized disk from shadow vm.

        The stream-optimized disk is obtained via configuring a shadow
        VM with the image disk we are interested in and exporting the
        reconfigured shadow VM.

        """

        self._ensure_shadow_vm()
        self._configure_shadow_vm_with_disk(image_id, image_datastore)
        lease, disk_url = self._export_shadow_vm()
        disk_url = self._ensure_host_in_url(disk_url,
                                            self._lease_url_host_name)
        return lease, disk_url

    def _create_import_vm_spec(self, image_id, datastore):
        vm_name = "h2h_%s" % str(uuid.uuid4())
        spec = self._vm_config.create_spec_for_import(vm_id=vm_name,
                                                      image_id=image_id,
                                                      datastore=datastore,
                                                      memory=32,
                                                      cpus=1)

        # Just specify a tiny capacity in the spec for now; the eventual vm
        # disk will be based on what is uploaded via the http nfc url.
        spec = self._vm_manager.create_empty_disk(spec, datastore, None,
                                                  size_mb=1)

        import_spec = vim.vm.VmImportSpec(configSpec=spec)
        return import_spec

    def _get_url_from_import_vm(self, dst_vim_client, import_spec):
        vm_folder = dst_vim_client.vm_folder
        root_rp = dst_vim_client.root_resource_pool
        lease = root_rp.ImportVApp(import_spec, vm_folder)
        self._wait_for_lease(lease)
        disk_url = self._get_disk_url_from_lease(lease)
        disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host)
        return lease, disk_url

    def _register_imported_image_at_host(self, agent_client,
                                         image_id, destination_datastore,
                                         imported_vm_name, metadata, manifest):
        """ Installs an image at another host.

        Image data was transferred via ImportVApp to said host.
        """

        request = ReceiveImageRequest(
            image_id=image_id,
            datastore_id=destination_datastore,
            transferred_image_id=imported_vm_name,
            metadata=metadata,
            manifest=manifest,
        )

        response = agent_client.receive_image(request)
        if response.result == ReceiveImageResultCode.DESTINATION_ALREADY_EXIST:
            raise DiskAlreadyExistException(response.error)
        if response.result != ReceiveImageResultCode.OK:
            raise ReceiveImageException(response.result, response.error)

    def _read_metadata(self, image_datastore, image_id):
        try:
            # Transfer raw manifest
            manifest_path = os_image_manifest_path(image_datastore, image_id)
            with open(manifest_path) as f:
                manifest = f.read()

            # Transfer raw metadata
            metadata_path = os_metadata_path(image_datastore, image_id,
                                             IMAGE_FOLDER_NAME)
            metadata = None
            if os.path.exists(metadata_path):
                with open(metadata_path, 'r') as f:
                    metadata = f.read()

            return manifest, metadata
        except:
            self._logger.exception("Failed to read metadata")
            raise

    @lock_non_blocking
    def send_image_to_host(self, image_id, image_datastore,
                           destination_image_id, destination_datastore,
                           host, port, intermediate_file_path=None):
        manifest, metadata = self._read_metadata(image_datastore, image_id)

        read_lease, disk_url = self._get_image_stream_from_shadow_vm(
            image_id, image_datastore)

        # Save stream-optimized disk to a unique path locally for now.
        # TODO(vui): Switch to chunked transfers to handle not knowing content
        # length in the full streaming mode.

        if intermediate_file_path:
            tmp_path = intermediate_file_path
        else:
            tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % (
                self._get_shadow_vm_datastore(),
                self._shadow_vm_id)
        try:
            self.download_file(disk_url, tmp_path)
        finally:
            read_lease.Complete()

        if destination_image_id is None:
            destination_image_id = image_id
        spec = self._create_import_vm_spec(
            destination_image_id, destination_datastore)

        agent_client, vim_client = self._get_remote_connections(host, port)
        try:
            write_lease, disk_url = self._get_url_from_import_vm(vim_client,
                                                                 spec)
            try:
                self.upload_file(tmp_path, disk_url)
            finally:
                write_lease.Complete()
                try:
                    os.unlink(tmp_path)
                except OSError:
                    pass

            # TODO(vui): imported vm name should be made unique to remove
            # ambiguity during subsequent lookup
            imported_vm_name = destination_image_id

            self._register_imported_image_at_host(
                agent_client, destination_image_id, destination_datastore,
                imported_vm_name, metadata, manifest)

        finally:
            agent_client.close()
            vim_client.disconnect()

        return imported_vm_name
コード例 #2
0
class HttpNfcTransferer(HttpTransferer):
    """ Class for handling HTTP-based disk transfers between ESX hosts.

    This class employs the ImportVApp and ExportVM APIs to transfer
    VMDKs efficiently to another host. A shadow VM is created and used in the
    initial export of the VMDK into the stream optimized format needed by
    ImportVApp.

    """

    LEASE_INITIALIZATION_WAIT_SECS = 10

    def __init__(self, vim_client, image_datastores, host_name="localhost"):
        super(HttpNfcTransferer, self).__init__(vim_client)
        self.lock = threading.Lock()
        self._lease_url_host_name = host_name
        self._image_datastores = image_datastores
        self._vm_config = EsxVmConfig(self._vim_client)
        self._vm_manager = EsxVmManager(self._vim_client, None)

    def _create_remote_vim_client(self, agent_client, host):
        request = ServiceTicketRequest(service_type=ServiceType.VIM)
        response = agent_client.get_service_ticket(request)
        if response.result != ServiceTicketResultCode.OK:
            self._logger.info("Get service ticket failed. Response = %s" %
                              str(response))
            raise ValueError("No ticket")
        vim_client = VimClient(host=host,
                               ticket=response.vim_ticket,
                               auto_sync=False)
        return vim_client

    def _get_disk_url_from_lease(self, lease):
        for dev_url in lease.info.deviceUrl:
            self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url))
            return dev_url.url

    def _wait_for_lease(self, lease):
        retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS
        state = None
        while retries > 0:
            state = lease.state
            if state != vim.HttpNfcLease.State.initializing:
                break
            retries -= 1
            time.sleep(1)

        if retries == 0:
            self._logger.debug("Nfc lease initialization timed out")
            raise NfcLeaseInitiatizationTimeout()
        if state == vim.HttpNfcLease.State.error:
            self._logger.debug("Fail to initialize nfc lease: %s" %
                               str(lease.error))
            raise NfcLeaseInitiatizationError()

    def _ensure_host_in_url(self, url, actual_host):

        # URLs from vApp export/import leases have '*' as placeholder
        # for host names that has to be replaced with the actual
        # host on which the resource resides.
        protocol, host, selector = self._split_url(url)
        if host.find("*") != -1:
            host = host.replace("*", actual_host)
        return "%s://%s%s" % (protocol, host, selector)

    def _export_shadow_vm(self, shadow_vm_id):
        """ Initiates the Export VM operation.

        The lease created as part of ExportVM contains, among other things,
        the url to the stream-optimized disk of the image currently associated
        with the VM being exported.
        """
        vm = self._vim_client.get_vm_obj_in_cache(shadow_vm_id)
        lease = vm.ExportVm()
        self._wait_for_lease(lease)
        return lease, self._get_disk_url_from_lease(lease)

    def _get_shadow_vm_datastore(self):
        # The datastore in which the shadow VM will be created.
        return self._image_datastores[0]

    def _create_shadow_vm(self):
        """ Creates a shadow vm specifically for use by this host.

        The shadow VM created is used to facilitate host-to-host transfer
        of any image accessible on this host to another datastore not directly
        accessible from this host.
        """
        shadow_vm_id = SHADOW_VM_NAME_PREFIX + str(uuid.uuid4())
        spec = self._vm_config.create_spec(
            vm_id=shadow_vm_id,
            datastore=self._get_shadow_vm_datastore(),
            memory=32,
            cpus=1)
        try:
            self._vm_manager.create_vm(shadow_vm_id, spec)
        except Exception:
            self._logger.exception("Error creating vm with id %s" %
                                   shadow_vm_id)
            raise
        return shadow_vm_id

    def _delete_shadow_vm(self, shadow_vm_id):
        try:
            # detach disk so it is not deleted along with vm
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(shadow_vm_id)
            self._vm_manager.remove_all_disks(spec, info)
            self._vm_manager.update_vm(shadow_vm_id, spec)

            # delete the vm
            self._vm_manager.delete_vm(shadow_vm_id, force=True)
        except Exception:
            self._logger.exception("Error deleting vm with id %s" %
                                   shadow_vm_id)

    def _configure_shadow_vm_with_disk(self, image_id, image_datastore,
                                       shadow_vm_id):
        """ Reconfigures the shadow vm to contain only one image disk. """
        try:
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(shadow_vm_id)
            self._vm_manager.add_disk(spec,
                                      image_datastore,
                                      image_id,
                                      info,
                                      disk_is_image=True)
            self._vm_manager.update_vm(shadow_vm_id, spec)
        except Exception:
            self._logger.exception(
                "Error configuring shadow vm with image %s" % image_id)
            raise

    def _get_image_stream_from_shadow_vm(self, image_id, image_datastore,
                                         shadow_vm_id):
        """ Obtain a handle to the streamOptimized disk from shadow vm.

        The stream-optimized disk is obtained via configuring a shadow
        VM with the image disk we are interested in and exporting the
        reconfigured shadow VM.

        """
        self._configure_shadow_vm_with_disk(image_id, image_datastore,
                                            shadow_vm_id)
        lease, disk_url = self._export_shadow_vm(shadow_vm_id)
        disk_url = self._ensure_host_in_url(disk_url,
                                            self._lease_url_host_name)
        return lease, disk_url

    def _prepare_receive_image(self, agent_client, image_id, datastore):
        request = PrepareReceiveImageRequest(image_id, datastore)
        response = agent_client.prepare_receive_image(request)
        if response.result != PrepareReceiveImageResultCode.OK:
            err_msg = "Failed to prepare receive image. Response = %s" % str(
                response)
            self._logger.info(err_msg)
            raise ValueError(err_msg)
        return response.import_vm_path, response.import_vm_id

    def _create_import_vm_spec(self, vm_id, datastore, vm_path):
        spec = EsxVmConfigSpec(vm_id, "otherGuest", 32, 1, vm_path, None)
        # Just specify a tiny capacity in the spec for now; the eventual vm
        # disk will be based on what is uploaded via the http nfc url.
        spec = self._vm_manager.create_empty_disk(spec,
                                                  datastore,
                                                  None,
                                                  size_mb=1)

        import_spec = vim.vm.VmImportSpec(configSpec=spec)
        return import_spec

    def _get_url_from_import_vm(self, dst_vim_client, import_spec):
        vm_folder = dst_vim_client.vm_folder
        root_rp = dst_vim_client.root_resource_pool
        lease = root_rp.ImportVApp(import_spec, vm_folder)
        self._wait_for_lease(lease)
        disk_url = self._get_disk_url_from_lease(lease)
        disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host)
        return lease, disk_url

    def _register_imported_image_at_host(self, agent_client, image_id,
                                         destination_datastore,
                                         imported_vm_name, metadata):
        """ Installs an image at another host.

        Image data was transferred via ImportVApp to said host.
        """

        request = ReceiveImageRequest(image_id=image_id,
                                      datastore_id=destination_datastore,
                                      transferred_image_id=imported_vm_name,
                                      metadata=metadata)

        response = agent_client.receive_image(request)
        if response.result == ReceiveImageResultCode.DESTINATION_ALREADY_EXIST:
            raise DiskAlreadyExistException(response.error)
        if response.result != ReceiveImageResultCode.OK:
            raise ReceiveImageException(response.result, response.error)

    def _read_metadata(self, image_datastore, image_id):
        try:
            # Transfer raw metadata
            metadata_path = os_metadata_path(image_datastore, image_id,
                                             IMAGE_FOLDER_NAME_PREFIX)
            metadata = None
            if os.path.exists(metadata_path):
                with open(metadata_path, 'r') as f:
                    metadata = f.read()

            return metadata
        except:
            self._logger.exception("Failed to read metadata")
            raise

    def _send_image(self, agent_client, host, tmp_path, spec):
        vim_client = self._create_remote_vim_client(agent_client, host)
        try:
            write_lease, disk_url = self._get_url_from_import_vm(
                vim_client, spec)
            try:
                self.upload_file(tmp_path, disk_url, write_lease)
            finally:
                write_lease.Complete()
        finally:
            vim_client.disconnect()

    @lock_non_blocking
    def send_image_to_host(self, image_id, image_datastore,
                           destination_image_id, destination_datastore, host,
                           port):
        if destination_image_id is None:
            destination_image_id = image_id
        metadata = self._read_metadata(image_datastore, image_id)

        shadow_vm_id = self._create_shadow_vm()

        # place transfer.vmdk under shadow_vm_path to work around VSAN's restriction on
        # files at datastore top-level
        shadow_vm_path = os_datastore_path(
            self._get_shadow_vm_datastore(),
            compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id))
        transfer_vmdk_path = os.path.join(shadow_vm_path, "transfer.vmdk")
        self._logger.info("transfer_vmdk_path = %s" % transfer_vmdk_path)

        agent_client = None
        try:
            read_lease, disk_url = self._get_image_stream_from_shadow_vm(
                image_id, image_datastore, shadow_vm_id)

            try:
                self.download_file(disk_url, transfer_vmdk_path, read_lease)
            finally:
                read_lease.Complete()

            agent_client = DirectClient("Host", Host.Client, host, port)
            agent_client.connect()

            vm_path, vm_id = self._prepare_receive_image(
                agent_client, destination_image_id, destination_datastore)
            spec = self._create_import_vm_spec(vm_id, destination_datastore,
                                               vm_path)

            self._send_image(agent_client, host, transfer_vmdk_path, spec)
            self._register_imported_image_at_host(agent_client,
                                                  destination_image_id,
                                                  destination_datastore, vm_id,
                                                  metadata)

            return vm_id
        finally:
            try:
                os.unlink(transfer_vmdk_path)
            except OSError:
                pass
            self._delete_shadow_vm(shadow_vm_id)
            rm_rf(shadow_vm_path)
            if agent_client:
                agent_client.close()
コード例 #3
0
class HttpNfcTransferer(HttpTransferer):
    """ Class for handling HTTP-based disk transfers between ESX hosts.

    This class employs the ImportVApp and ExportVM APIs to transfer
    VMDKs efficiently to another host. A shadow VM is created and used in the
    initial export of the VMDK into the stream optimized format needed by
    ImportVApp.

    """

    LEASE_INITIALIZATION_WAIT_SECS = 10

    def __init__(self, vim_client, image_datastores, host_name="localhost"):
        super(HttpNfcTransferer, self).__init__(vim_client)
        self.lock = threading.Lock()
        self._shadow_vm_id = "shadow_%s" % self._vim_client.host_uuid
        self._lease_url_host_name = host_name
        self._image_datastores = image_datastores
        self._vm_config = EsxVmConfig(self._vim_client)
        self._vm_manager = EsxVmManager(self._vim_client, None)

    def _get_remote_connections(self, host, port):
        agent_client = DirectClient("Host", Host.Client, host, port)
        agent_client.connect()
        request = ServiceTicketRequest(service_type=ServiceType.VIM)
        response = agent_client.get_service_ticket(request)
        if response.result != ServiceTicketResultCode.OK:
            self._logger.info("Get service ticket failed. Response = %s" %
                              str(response))
            raise ValueError("No ticket")
        vim_client = VimClient(host=host,
                               ticket=response.vim_ticket,
                               auto_sync=False)
        return agent_client, vim_client

    def _get_disk_url_from_lease(self, lease):
        for dev_url in lease.info.deviceUrl:
            self._logger.debug("%s -> %s" % (dev_url.key, dev_url.url))
            return dev_url.url

    def _wait_for_lease(self, lease):
        retries = HttpNfcTransferer.LEASE_INITIALIZATION_WAIT_SECS
        state = None
        while retries > 0:
            state = lease.state
            if state != vim.HttpNfcLease.State.initializing:
                break
            retries -= 1
            time.sleep(1)

        if retries == 0:
            self._logger.debug("Nfc lease initialization timed out")
            raise NfcLeaseInitiatizationTimeout()
        if state == vim.HttpNfcLease.State.error:
            self._logger.debug("Fail to initialize nfc lease: %s" %
                               str(lease.error))
            raise NfcLeaseInitiatizationError()

    def _ensure_host_in_url(self, url, actual_host):

        # URLs from vApp export/import leases have '*' as placeholder
        # for host names that has to be replaced with the actual
        # host on which the resource resides.
        protocol, host, selector = self._split_url(url)
        if host.find("*") != -1:
            host = host.replace("*", actual_host)
        return "%s://%s%s" % (protocol, host, selector)

    def _export_shadow_vm(self):
        """ Initiates the Export VM operation.

        The lease created as part of ExportVM contains, among other things,
        the url to the stream-optimized disk of the image currently associated
        with the VM being exported.
        """
        vm = self._vim_client.get_vm_obj_in_cache(self._shadow_vm_id)
        lease = vm.ExportVm()
        self._wait_for_lease(lease)
        return lease, self._get_disk_url_from_lease(lease)

    def _get_shadow_vm_datastore(self):
        # The datastore in which the shadow VM will be created.
        return self._image_datastores[0]

    def _ensure_shadow_vm(self):
        """ Creates a shadow vm specifically for use by this host if absent.

        The shadow VM created is used to facilitate host-to-host transfer
        of any image accessible on this host to another datastore not directly
        accessible from this host.
        """
        vm_id = self._shadow_vm_id
        if self._vm_manager.has_vm(vm_id):
            self._logger.debug("shadow vm exists")
            return

        spec = self._vm_config.create_spec(
            vm_id=vm_id,
            datastore=self._get_shadow_vm_datastore(),
            memory=32,
            cpus=1)
        try:
            self._vm_manager.create_vm(vm_id, spec)
        except Exception:
            self._logger.exception("Error creating vm with id %s" % vm_id)
            raise

    def _configure_shadow_vm_with_disk(self, image_id, image_datastore):
        """ Reconfigures the shadow vm to contain only one image disk. """
        try:
            spec = self._vm_manager.update_vm_spec()
            info = self._vm_manager.get_vm_config(self._shadow_vm_id)
            self._vm_manager.remove_all_disks(spec, info)
            self._vm_manager.add_disk(spec,
                                      image_datastore,
                                      image_id,
                                      info,
                                      disk_is_image=True)
            self._vm_manager.update_vm(self._shadow_vm_id, spec)
        except Exception:
            self._logger.exception(
                "Error configuring shadow vm with image %s" % image_id)
            raise

    def _get_image_stream_from_shadow_vm(self, image_id, image_datastore):
        """ Obtain a handle to the streamOptimized disk from shadow vm.

        The stream-optimized disk is obtained via configuring a shadow
        VM with the image disk we are interested in and exporting the
        reconfigured shadow VM.

        """

        self._ensure_shadow_vm()
        self._configure_shadow_vm_with_disk(image_id, image_datastore)
        lease, disk_url = self._export_shadow_vm()
        disk_url = self._ensure_host_in_url(disk_url,
                                            self._lease_url_host_name)
        return lease, disk_url

    def _create_import_vm_spec(self, image_id, datastore):
        vm_name = "h2h_%s" % str(uuid.uuid4())
        spec = self._vm_config.create_spec_for_import(vm_id=vm_name,
                                                      image_id=image_id,
                                                      datastore=datastore,
                                                      memory=32,
                                                      cpus=1)

        # Just specify a tiny capacity in the spec for now; the eventual vm
        # disk will be based on what is uploaded via the http nfc url.
        spec = self._vm_manager.create_empty_disk(spec,
                                                  datastore,
                                                  None,
                                                  size_mb=1)

        import_spec = vim.vm.VmImportSpec(configSpec=spec)
        return import_spec

    def _get_url_from_import_vm(self, dst_vim_client, import_spec):
        vm_folder = dst_vim_client.vm_folder
        root_rp = dst_vim_client.root_resource_pool
        lease = root_rp.ImportVApp(import_spec, vm_folder)
        self._wait_for_lease(lease)
        disk_url = self._get_disk_url_from_lease(lease)
        disk_url = self._ensure_host_in_url(disk_url, dst_vim_client.host)
        return lease, disk_url

    def _register_imported_image_at_host(self, agent_client, image_id,
                                         destination_datastore,
                                         imported_vm_name):
        """ Installs an image at another host.

        Image data was transferred via ImportVApp to said host.
        """

        request = ReceiveImageRequest(image_id=image_id,
                                      datastore_id=destination_datastore,
                                      transferred_image_id=imported_vm_name)

        response = agent_client.receive_image(request)
        if response.result != ReceiveImageResultCode.OK:
            raise ReceiveImageException(response.result, response.error)

    @lock_non_blocking
    def send_image_to_host(self,
                           image_id,
                           image_datastore,
                           destination_image_id,
                           destination_datastore,
                           host,
                           port,
                           intermediate_file_path=None):
        read_lease, disk_url = self._get_image_stream_from_shadow_vm(
            image_id, image_datastore)

        # Save stream-optimized disk to a unique path locally for now.
        # TODO(vui): Switch to chunked transfers to handle not knowing content
        # length in the full streaming mode.

        if intermediate_file_path:
            tmp_path = intermediate_file_path
        else:
            tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % (
                self._get_shadow_vm_datastore(), self._shadow_vm_id)
        try:
            self.download_file(disk_url, tmp_path)
        finally:
            read_lease.Complete()

        if destination_image_id is None:
            destination_image_id = image_id
        spec = self._create_import_vm_spec(destination_image_id,
                                           destination_datastore)

        agent_client, vim_client = self._get_remote_connections(host, port)
        try:
            write_lease, disk_url = self._get_url_from_import_vm(
                vim_client, spec)
            try:
                self.upload_file(tmp_path, disk_url)
            finally:
                write_lease.Complete()
                try:
                    os.unlink(tmp_path)
                except OSError:
                    pass

            # TODO(vui): imported vm name should be made unique to remove
            # ambiguity during subsequent lookup
            imported_vm_name = destination_image_id

            self._register_imported_image_at_host(agent_client,
                                                  destination_image_id,
                                                  destination_datastore,
                                                  imported_vm_name)

        finally:
            agent_client.close()
            vim_client.disconnect()

        return imported_vm_name