def get_client(options, ns="Host"): host, port = options.host, options.port print "Connecting %s:%d ..." % (host, port) if ns == "Host": client = DirectClient("Host", Host.Client, host, port) elif ns == "AgentControl": client = DirectClient("AgentControl", AgentControl.Client, host, port) elif ns == "Scheduler": client = DirectClient("Scheduler", Scheduler.Client, host, port) client.connect() return client
def get_client(host, ns="Host"): port = 8835 print "Connecting %s:%d ..." % (host, port) if ns == "Host": client = DirectClient("Host", Host.Client, host, port, validate=False) elif ns == "AgentControl": client = DirectClient("AgentControl", AgentControl.Client, host, port, validate=False) client.connect() return client
def start_agent(self, config): """ config - Use get_default_agent_config() to get the default config, and modify the dict as needed. """ address = config["--hostname"] port = int(config["--port"]) mkdir_p(config["--config-path"]) arg_list = ["photon-controller-agent"] for (key, val) in config.items(): arg_list.append(key) if val: arg_list.append(val) # Keeping track of what is created for clean up purposes agent_client = DirectClient("Host", Host.Client, address, port) control_client = DirectClient("AgentControl", AgentControl.Client, address, port) try: agent_client.connect() agent_client.close() raise Exception("Agent already running on port %s" % port) except TTransport.TTransportException: pass proc = subprocess.Popen(arg_list) self.agent_procs.append(proc) def wait(process): if process: try: os.waitpid(process.pid, os.WUNTRACED) except OSError: # Process might already exit pass threading.Thread(target=wait, args=(proc, )).start() # Back off on failure to connect to agent max_sleep_time = 5 sleep_time = 0.1 while sleep_time < max_sleep_time: try: agent_client.connect() control_client.connect() return (proc, agent_client, control_client) except TTransport.TTransportException: time.sleep(sleep_time) sleep_time *= 2 return (None, None, None)
def connect(self, host, port, server_id=None, client_timeout=DEFAULT_CLIENT_TIMEOUT): """Connect client for RPC to Thrift handler :rtype common.photon_thrift.Client: proxy client to remote or local handler """ # HACK: remove once there is a task based API and thread pools per # service. # The issue is that we're trying to send a request to ourselves, # yet our request queue is backed up so we're blocking the pending # request until a child request to ourselves times out. # This works around the trivial issue, however the issue persists if # we send a child request to a different node who then as part of # that request needs to request something from us. # This happens in the scheduler hierarchy with a branch scheduler. # It will fan out a request to a leaf scheduler who will fan it out # back to the node who had the branch scheduler to get that # individual Host's response. local_agent_id = common.services.get(ServiceName.AGENT_CONFIG).host_id if server_id == local_agent_id: yield self._handler else: client = None try: client = DirectClient(self._service_name, self._client_klass, host, port, client_timeout=client_timeout) client.connect() yield client finally: if client: client.close()
def get_leaf_scheduler(address, port): """ This function will retrive a leaf scheduler from an agent, if the agent is a leaf scheduler and is online. Otherwise, it will return None if the agent is not a leaf scheduler or isn't available. """ try: leaf_client = DirectClient("AgentControl", AgentControl.Client, address, port) leaf_client.connect() req = GetSchedulersRequest() resp = leaf_client.get_schedulers(req) if resp.result != GetSchedulersResultCode.OK or not resp.schedulers: # Doesn't have a role, not a leaf scheduler return role = resp.schedulers[0].role leaf_sch = Scheduler(role.id, LEAF_SCHEDULER_TYPE) hosts = role.host_children for host in hosts: host = Host(host.id, host.address, host.port, leaf_sch) if address == host.address and port == host.port: leaf_sch.owner = host leaf_sch.add_child(host) return leaf_sch except Exception, e: log.exception(e)
def _get_cgi_ticket(self, host, port, url, http_op=HttpOp.GET): client = DirectClient("Host", Host.Client, host, port) client.connect() request = HttpTicketRequest(op=http_op, url="%s" % url) response = client.get_http_ticket(request) if response.result != HttpTicketResultCode.OK: raise ValueError("No ticket") return response.ticket
def workload(num_vms, additional_places, server, port): client = DirectClient("Host", Host.Client, server, port) client.connect() for _ in xrange(num_vms): vm = VmWrapper(client) for _ in xrange(additional_places): vm.place() vm.create() client.close()
def _get_remote_connections(self, host, port): agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() request = ServiceTicketRequest(service_type=ServiceType.VIM) response = agent_client.get_service_ticket(request) if response.result != ServiceTicketResultCode.OK: self._logger.info("Get service ticket failed. Response = %s" % str(response)) raise ValueError("No ticket") vim_client = VimClient( host=host, ticket=response.vim_ticket, auto_sync=False) return agent_client, vim_client
def _connect_client(self, service, cls, server): """ Utility method to connect to a remote agent """ max_sleep_time = 32 sleep_time = 0.1 while sleep_time < max_sleep_time: try: client = DirectClient(service, cls, server, 8835) client.connect() return client except TTransport.TTransportException: time.sleep(sleep_time) sleep_time *= 2 self.fail("Cannot connect to agent %s" % server)
def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): if destination_image_id is None: destination_image_id = image_id metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() # place transfer.vmdk under shadow_vm_path to work around VSAN's restriction on # files at datastore top-level shadow_vm_path = os_datastore_path( self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id)) transfer_vmdk_path = os.path.join(shadow_vm_path, "transfer.vmdk") self._logger.info("transfer_vmdk_path = %s" % transfer_vmdk_path) agent_client = None try: read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, transfer_vmdk_path, read_lease) finally: read_lease.Complete() agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() vm_path, vm_id = self._prepare_receive_image( agent_client, destination_image_id, destination_datastore) spec = self._create_import_vm_spec(vm_id, destination_datastore, vm_path) self._send_image(agent_client, host, transfer_vmdk_path, spec) self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, vm_id, metadata) return vm_id finally: try: os.unlink(transfer_vmdk_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf(shadow_vm_path) if agent_client: agent_client.close()
def clear(self): """Remove all the VMs and disks""" for host in self.hosts: client = DirectClient("Host", Host.Client, host, 8835) client.connect() request = GetResourcesRequest() response = rpc_call(client.get_resources, request) vm_wrapper = VmWrapper(client) for resource in response.resources: disk_ids = [disk.id for disk in resource.disks] delete_request = Host.DeleteVmRequest(resource.vm.id, disk_ids) vm_wrapper.delete(request=delete_request) vm_wrapper.delete_disks(disk_ids, validate=True) client.close()
def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): if destination_image_id is None: destination_image_id = image_id metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % ( self._get_shadow_vm_datastore(), shadow_vm_id) self._logger.info("http_disk_transfer: tmp_path = %s" % tmp_path) agent_client = None try: read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, tmp_path, read_lease) finally: read_lease.Complete() agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() vm_path, vm_id = self._prepare_receive_image( agent_client, destination_image_id, destination_datastore) spec = self._create_import_vm_spec(vm_id, destination_datastore, vm_path) self._send_image(agent_client, host, tmp_path, spec) self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, vm_id, metadata) return vm_id finally: try: os.unlink(tmp_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf( os_datastore_path( self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id))) if agent_client: agent_client.close()
def send_image_to_host(self, source_image_id, source_datastore, destination_image_id, destination_datastore, destination_host, destination_port): self._logger.info("transfer_image: connecting to remote agent") remote_agent_client = DirectClient("Host", Host.Client, destination_host, destination_port, 60) remote_agent_client.connect() self._logger.info("transfer_image: getting ticket") nfc_ticket = self._get_nfc_ticket(remote_agent_client, destination_datastore) self._logger.info("transfer_image: creating remote image") if destination_image_id is None: destination_image_id = source_image_id upload_folder = self._create_remote_image(remote_agent_client, destination_image_id, destination_datastore) try: source_file_path = datastore_path( source_datastore, compond_path_join(IMAGE_FOLDER_NAME_PREFIX, source_image_id), vmdk_add_suffix(source_image_id)) destination_file_path = os.path.join( upload_folder, vmdk_add_suffix(destination_image_id)) self._logger.info( "transfer_image: nfc copy image %s => (%s)%s, sslThumbprint=%s, ticket=%s", source_file_path, destination_host, destination_file_path, nfc_ticket.ssl_thumbprint, nfc_ticket.session_id) self._host_client.nfc_copy(source_file_path, destination_host, destination_file_path, nfc_ticket.ssl_thumbprint, nfc_ticket.session_id) self._logger.info("transfer_image: finalizing remote image") self._finalize_remote_image(remote_agent_client, destination_image_id, destination_datastore, upload_folder) except: self._logger.info("transfer_image: cleaning up failed transfer") self._cleanup_remote_image(remote_agent_client, destination_datastore, upload_folder) raise