def terminate(self, terminate_proxy = 0): if self.__debug and self.__verbose: print 'PyroProxy.terminate: %s terminating...' % self self.__stop = True self.__called.set() ## if this proxy is used within PyroGrid, ## PyroHandler.terminate will clean up by: ## ## 1. unregister Pyro URI from nameserver ## 2. close remote python process ## ## therefore terminate_proxy should be 0 if terminate_proxy: self.__pyro_lock.acquire() ## 1. unregister Pyro URI from nameserver and handler ## 2. leaves remote python process intact self.__pyro_proxy._release() self.__pyro_proxy._pyro_stop = True if not PyroUtils.is_stopped(self.__uri, ns = self.__nshost): PyroUtils.unregister(self.__uri, ns = self.__nshost) if hasattr(self, '_handler'): self._handler._release() self._handler.instance_uri = None while not self.__stopped: time.sleep(0.1)
def terminate(self): ## called from PyroProxy.terminate or on sys.exit (WatchDog call) if self.debug: print 'PyroHandler.terminate: %s terminating... ' % self ##--- call terminate only ## if the handler is already terminated (e.g. by hands) ## then function is not called on the exit topop = [atexit._exithandlers[i][0] == self.terminate for i in range(len(atexit._exithandlers))] if True in topop: atexit._exithandlers.pop( topop.index(True) ) ##--- unregister from the nameserver ## it will waits PyroUtils.default_timeout to let it ## finish self-unregistration if not self.instance_uri == None: PyroUtils.unregister(self.instance_uri, ns = self.nshost) ##--- shuts down remote process ## will leave interpreter alive if debug = True ## PyroHandler itself is an object instance managed ## by Pyro.core.Daemon, hence will be stopped by setting ## PyroHandler._pyro_stop to True self._pyro_stop = True
def _connect_instances(self, service_id, \ launched_instances, launched_instances_lock, \ self_handlers_lock, servers_lock): ## connect to instances sequentially _ns = PyroUtils.get_nameserver(self.nshost) for i in range(self.n_hosts): launched_instances_lock.acquire() if launched_instances.empty(): launched_instances_lock.wait() instance_uri = launched_instances.get() launched_instances_lock.release() try: instance_proxy = PyroUtils.get_proxy(instance_uri, ns = _ns) except: print 'PyroGrid._connect_instances: failed to connect to "%s"' % instance_uri raise if self.debug: print 'PyroGrid._connect_instances: instance "%s" on "%s" is ready' \ % (instance_uri, instance_proxy._pyro_suppl['host']) self._append_instance(service_id, instance_proxy, \ self_handlers_lock, servers_lock) self.__published[service_id] = True
def _connect_handlers(self, handler_uris, handlers, handlers_lock, hosts_map): #handlers: a FIFO queue, #handlers_lock: a Condition lock #handler (see below): the PyroHandler proxy ## connect to handlers sequentially _ns = PyroUtils.get_nameserver(self.nshost) for i in range(self.n_hosts): try: handler = PyroUtils.get_proxy(handler_uris[i], ns = _ns) except: print 'PyroGrid._connect_handlers: failed to connect to "%s"' % handler_uris[i] raise #tell the pyro proxy to close the socket for now. handler._release() #tell the handler on which host he's running. handler.host = hosts_map[handler_uris[i]] if self.debug: print 'PyroGrid._connect_handlers: handler "%s" on "%s" is ready' \ % (handler_uris[i], handler._pyro_suppl['host']) #acquire the lock handlers_lock.acquire() #put the handler proxy object into the queue handlers.put(handler) if self.debug: print 'PyroGrid._connect_handlers: notifies "%s" is in the queue' % handler_uris[i] #update queue status and release lock handlers_lock.notify() handlers_lock.release()
def start_handlers(self, service_id): """ Starts PyroHandler on each host (non-blocking) and returns a Condition lock object and a FIFO queue that is being filled with their Pyro proxies by a separate thread """ #create a list of unique URIs for each host handler_uris = PyroUtils.create_unique_uri(\ string.join(('PyroHandler', service_id),'.'),\ n = self.n_hosts, ns = self.nshost) #create a FIFO queue of infinite capacity handlers = Queue.Queue(-1) #create the condition lock, see effbot.org/zone/thread-synchronization.htm handlers_lock = threading.Condition() hosts_map = {} for i in range(self.n_hosts): #ssh to each host and fork a PyroHandler thread. self._launch_handler(handler_uris[i], self.hosts[i]) hosts_map[handler_uris[i]] = self.hosts[i] ## sleep here is to avoid errors like ## "Warning: No xauth data; using fake ## authentication data for X11 forwarding." #TODO: speedup things by checking if it is necessary time.sleep(self.X11_delay) #fill the queue asynchronously with the handler proxy objects that were launched. t = threading.Thread(target = self._connect_handlers, \ args = (handler_uris, handlers, handlers_lock, hosts_map)) t.start() return handlers, handlers_lock
def start_instances(self, service_id, instance, handlers, handlers_lock): #create n unique URIs for the instance, out of its service_id. instance_uris = PyroUtils.create_unique_uri(\ service_id, n = self.n_hosts, ns = self.nshost) #queue and lock for managing the launched instances of the 'instance' object #via the PyroHandler launched_instances = Queue.Queue(-1) launched_instances_lock = threading.Condition() instances = Queue.Queue(-1) #lock for the self.handlers dict self_handlers_lock = threading.Condition() servers_lock = threading.Condition() #Call PyroHandler.publish(instance) on each host t0 = threading.Thread(target = self._launch_instances, \ args = (instance, instance_uris, handlers, handlers_lock,\ launched_instances, launched_instances_lock, \ self_handlers_lock)) t0.start() #Create a PyroServer instance for each host, and put #them into #self.queues: a dict[service_id of instance #to be published]=Queue(-1) containing the DerivedServers #self.servers: dict[service_id of instance #to be published]=[list of PyroServer instances] t1 = threading.Thread(target = self._connect_instances, \ args = (service_id, \ launched_instances, launched_instances_lock, \ self_handlers_lock, servers_lock)) t1.start() return servers_lock