def connect(self, host, port, server_id=None, client_timeout=DEFAULT_CLIENT_TIMEOUT): """Connect client for RPC to Thrift handler :rtype common.photon_thrift.Client: proxy client to remote or local handler """ # HACK: remove once there is a task based API and thread pools per # service. # The issue is that we're trying to send a request to ourselves, # yet our request queue is backed up so we're blocking the pending # request until a child request to ourselves times out. # This works around the trivial issue, however the issue persists if # we send a child request to a different node who then as part of # that request needs to request something from us. # This happens in the scheduler hierarchy with a branch scheduler. # It will fan out a request to a leaf scheduler who will fan it out # back to the node who had the branch scheduler to get that # individual Host's response. local_agent_id = common.services.get(ServiceName.AGENT_CONFIG).host_id if server_id == local_agent_id: yield self._handler else: client = None try: client = DirectClient(self._service_name, self._client_klass, host, port, client_timeout=client_timeout) client.connect() yield client finally: if client: client.close()
def workload(num_vms, additional_places, server, port): client = DirectClient("Host", Host.Client, server, port) client.connect() for _ in xrange(num_vms): vm = VmWrapper(client) for _ in xrange(additional_places): vm.place() vm.create() client.close()
def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): if destination_image_id is None: destination_image_id = image_id metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() # place transfer.vmdk under shadow_vm_path to work around VSAN's restriction on # files at datastore top-level shadow_vm_path = os_datastore_path( self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id)) transfer_vmdk_path = os.path.join(shadow_vm_path, "transfer.vmdk") self._logger.info("transfer_vmdk_path = %s" % transfer_vmdk_path) agent_client = None try: read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, transfer_vmdk_path, read_lease) finally: read_lease.Complete() agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() vm_path, vm_id = self._prepare_receive_image( agent_client, destination_image_id, destination_datastore) spec = self._create_import_vm_spec(vm_id, destination_datastore, vm_path) self._send_image(agent_client, host, transfer_vmdk_path, spec) self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, vm_id, metadata) return vm_id finally: try: os.unlink(transfer_vmdk_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf(shadow_vm_path) if agent_client: agent_client.close()
def clear(self): """Remove all the VMs and disks""" for host in self.hosts: client = DirectClient("Host", Host.Client, host, 8835) client.connect() request = GetResourcesRequest() response = rpc_call(client.get_resources, request) vm_wrapper = VmWrapper(client) for resource in response.resources: disk_ids = [disk.id for disk in resource.disks] delete_request = Host.DeleteVmRequest(resource.vm.id, disk_ids) vm_wrapper.delete(request=delete_request) vm_wrapper.delete_disks(disk_ids, validate=True) client.close()
def start_agent(self, config): """ config - Use get_default_agent_config() to get the default config, and modify the dict as needed. """ address = config["--hostname"] port = int(config["--port"]) mkdir_p(config["--config-path"]) arg_list = ["photon-controller-agent"] for (key, val) in config.items(): arg_list.append(key) if val: arg_list.append(val) # Keeping track of what is created for clean up purposes agent_client = DirectClient("Host", Host.Client, address, port) control_client = DirectClient("AgentControl", AgentControl.Client, address, port) try: agent_client.connect() agent_client.close() raise Exception("Agent already running on port %s" % port) except TTransport.TTransportException: pass proc = subprocess.Popen(arg_list) self.agent_procs.append(proc) def wait(process): if process: try: os.waitpid(process.pid, os.WUNTRACED) except OSError: # Process might already exit pass threading.Thread(target=wait, args=(proc, )).start() # Back off on failure to connect to agent max_sleep_time = 5 sleep_time = 0.1 while sleep_time < max_sleep_time: try: agent_client.connect() control_client.connect() return (proc, agent_client, control_client) except TTransport.TTransportException: time.sleep(sleep_time) sleep_time *= 2 return (None, None, None)
def start_agent(self, config): """ config - Use get_default_agent_config() to get the default config, and modify the dict as needed. """ address = config["--hostname"] port = int(config["--port"]) mkdir_p(config["--config-path"]) arg_list = ["photon-controller-agent"] for (key, val) in config.items(): arg_list.append(key) if val: arg_list.append(val) # Keeping track of what is created for clean up purposes agent_client = DirectClient("Host", Host.Client, address, port) control_client = DirectClient("AgentControl", AgentControl.Client, address, port) try: agent_client.connect() agent_client.close() raise Exception("Agent already running on port %s" % port) except TTransport.TTransportException: pass proc = subprocess.Popen(arg_list) self.agent_procs.append(proc) def wait(process): if process: try: os.waitpid(process.pid, os.WUNTRACED) except OSError: # Process might already exit pass threading.Thread(target=wait, args=(proc,)).start() # Back off on failure to connect to agent max_sleep_time = 5 sleep_time = 0.1 while sleep_time < max_sleep_time: try: agent_client.connect() control_client.connect() return (proc, agent_client, control_client) except TTransport.TTransportException: time.sleep(sleep_time) sleep_time *= 2 return (None, None, None)
def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): if destination_image_id is None: destination_image_id = image_id metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % ( self._get_shadow_vm_datastore(), shadow_vm_id) self._logger.info("http_disk_transfer: tmp_path = %s" % tmp_path) agent_client = None try: read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, tmp_path, read_lease) finally: read_lease.Complete() agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() vm_path, vm_id = self._prepare_receive_image( agent_client, destination_image_id, destination_datastore) spec = self._create_import_vm_spec(vm_id, destination_datastore, vm_path) self._send_image(agent_client, host, tmp_path, spec) self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, vm_id, metadata) return vm_id finally: try: os.unlink(tmp_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf( os_datastore_path( self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id))) if agent_client: agent_client.close()