def _getDirectory(cls, realm, cat): msg = False if 0 != len(cls._zDir): while msg is False: iZ = random.randint(0, len(cls._zDir) - 1) z = cls._zDir[iZ] # These requests can be sent to the directory service of a HostManager # or the ops service of the HostManager. Directory service is OOB from the # ops but is only available locally to Actors. The ops is available from outside # the host. So if the ActorHandle is created by an Actor, it goes to the dir_svc # and if it's created from outside components through a Beach it goes to # the ops. msg = z.request(data={ 'req': 'get_dir', 'realm': realm, 'cat': cat }, timeout=10) if isMessageSuccess(msg) and 'endpoints' in msg['data']: msg = msg['data']['endpoints'] else: msg = False cls._zDir[iZ] = _ZMREQ(cls._zHostDir[iZ], isBind=False, private_key=cls._private_key) z.close() gevent.sleep(1) return msg
def _connectToNode( self, host, isSeed = False ): host = socket.gethostbyname( host ) nodeSocket = _ZMREQ( 'tcp://%s:%d' % ( host, self._opsPort ), isBind = False, private_key = self._private_key ) self._nodes[ host ] = { 'socket' : nodeSocket, 'info' : None, 'is_seed' : isSeed } print( "Connected to node ops at: %s:%d" % ( host, self._opsPort ) )
def _startInstance( self, isIsolated = False ): instanceId = str( uuid.uuid4() ) procSocket = _ZMREQ( 'ipc:///tmp/py_beach_instance_%s' % instanceId, isBind = False ) instance = { 'socket' : procSocket, 'p' : None, 'isolated' : isIsolated, 'id' : instanceId } self.processes.append( instance ) self._log( "Managing instance at: %s" % ( 'ipc:///tmp/py_beach_instance_%s' % instanceId, ) ) return instance
def _setHostDirInfo( cls, zHostDir, private_key = None ): cls._private_key = private_key if type( zHostDir ) is not tuple and type( zHostDir ) is not list: zHostDir = ( zHostDir, ) if cls._zHostDir is None: cls._zHostDir = zHostDir for h in zHostDir: cls._zDir.append( _ZMREQ( h, isBind = False, private_key = cls._private_key ) )
def _setHostDirInfo(cls, zHostDir, private_key=None): cls._private_key = private_key if type(zHostDir) is not tuple and type(zHostDir) is not list: zHostDir = (zHostDir, ) if cls._zHostDir is None: cls._zHostDir = zHostDir for h in zHostDir: cls._zDir.append( _ZMREQ(h, isBind=False, private_key=cls._private_key))
def _updateDirectory( self ): newDir = self._getDirectory( self._realm, self._cat ) if newDir is not False: self._endpoints = newDir if 'affinity' != self._mode: for z_ident, z_url in self._endpoints.items(): if z_ident not in self._peerSockets: self._peerSockets[ z_ident ] = _ZMREQ( z_url, isBind = False, private_key = self._private_key, congestionCB = self._reportCongestion ) if not self._initialRefreshDone.isSet(): self._initialRefreshDone.set()
def _connectToNode(self, host, isSeed=False): host = socket.gethostbyname(host) nodeSocket = _ZMREQ('tcp://%s:%d' % (host, self._opsPort), isBind=False, private_key=self._private_key) self._nodes[host] = { 'socket': nodeSocket, 'info': None, 'is_seed': isSeed } print("Connected to node ops at: %s:%d" % (host, self._opsPort))
def _updateDirectory(self): newDir = self._getDirectory(self._realm, self._cat) if newDir is not False: self._endpoints = newDir if 'affinity' != self._mode: for z_ident, z_url in self._endpoints.items(): if z_ident not in self._peerSockets: self._peerSockets[z_ident] = _ZMREQ( z_url, isBind=False, private_key=self._private_key, congestionCB=self._reportCongestion) if not self._initialRefreshDone.isSet(): self._initialRefreshDone.set()
def _getDirectory( cls, realm, cat ): msg = False if 0 != len( cls._zDir ): while msg is False: iZ = random.randint( 0, len( cls._zDir ) - 1 ) z = cls._zDir[ iZ ] # These requests can be sent to the directory service of a HostManager # or the ops service of the HostManager. Directory service is OOB from the # ops but is only available locally to Actors. The ops is available from outside # the host. So if the ActorHandle is created by an Actor, it goes to the dir_svc # and if it's created from outside components through a Beach it goes to # the ops. msg = z.request( data = { 'req' : 'get_dir', 'realm' : realm, 'cat' : cat }, timeout = 10 ) if isMessageSuccess( msg ) and 'endpoints' in msg[ 'data' ]: msg = msg[ 'data' ][ 'endpoints' ] else: msg = False cls._zDir[ iZ ] = _ZMREQ( cls._zHostDir[ iZ ], isBind = False, private_key = cls._private_key ) z.close() gevent.sleep( 1 ) return msg
def _startInstance( self, isIsolated = False ): instance = { 'socket' : None, 'p' : None, 'isolated' : isIsolated, 'id' : str( uuid.uuid4() ), 'start' : time.time() } instance[ 'socket' ] = _ZMREQ( 'ipc:///tmp/py_beach_instance_%s' % instance[ 'id' ], isBind = False ) proc = subprocess.Popen( [ 'python', '%s/actorhost.py' % self.py_beach_dir, self.configFilePath, instance[ 'id' ], str( self._log_level ), self._log_dest, self.interface ], close_fds = True ) instance[ 'p' ] = proc instance[ 'start' ] = time.time() self.processes.append( instance ) self._log( "Managing instance at: %s" % ( 'ipc:///tmp/py_beach_instance_%s' % instance[ 'id' ], ) ) return instance
def _connectToNode( self, ip ): ip = socket.gethostbyname( ip ) nodeSocket = _ZMREQ( 'tcp://%s:%d' % ( ip, self.opsPort ), isBind = False, private_key = self.private_key ) self.nodes[ ip ] = { 'socket' : nodeSocket, 'last_seen' : None }
def _connectToNode( self, ip ): nodeSocket = _ZMREQ( 'tcp://%s:%d' % ( ip, self.opsPort ), isBind = False ) self.nodes[ ip ] = { 'socket' : nodeSocket, 'last_seen' : None }
def request(self, requestType, data={}, timeout=None, key=None, nRetries=None, isWithFuture=False, onFailure=None): '''Issue a request to the actor category of this handle. :param requestType: the type of request to issue :param data: a dict of the data associated with the request :param timeout: the number of seconds to wait for a response :param key: when used in 'affinity' mode, the key is the main parameter to evaluate to determine which Actor to send the request to, in effect it is the key to the hash map of Actors :param nRetries: the number of times the request will be re-sent if it times out, meaning a timeout of 5 and a retry of 3 could result in a request taking 15 seconds to return :param isWithFuture: return a Future instead of the actual response :param onFailure: execute this function callback on failure with a single argument that is the message :returns: the response to the request as an ActorResponse ''' z = None z_ident = None ret = False curRetry = 0 affinityKey = None # Short-circuit for cases where a category just isn't populated. if self._initialRefreshDone.isSet() and 0 == len(self._endpoints): return ActorResponse(False) if nRetries is None: nRetries = self._nRetries if nRetries is None: nRetries = 0 if timeout is None: timeout = self._timeout if 0 == timeout: timeout = None if isWithFuture: futureResult = FutureResults(1) self._threads.add( gevent.spawn(withLogException(self._requestToFuture, actor=self._fromActor), futureResult, requestType, data=data, timeout=timeout, key=key, nRetries=nRetries, isWithFuture=False, onFailure=onFailure)) return futureResult while curRetry <= nRetries: try: # We use the timeout to wait for an available node if none # exists with gevent.Timeout(timeout, _TimeoutException): self._initialRefreshDone.wait( timeout=timeout if timeout is not None else self. _timeout) while z is None: if 'affinity' == self._mode and key is not None: # Affinity is currently a soft affinity, meaning the set of Actors # is not locked, if it changes, affinity is re-computed without migrating # any previous affinities. Therefore, I suggest a good cooldown before # starting to process with affinity after the Actors have been spawned. orderedEndpoints = sorted( self._endpoints.items(), key=lambda x: x.__getitem__(0)) orderHash = tuple([x[0] for x in orderedEndpoints ]).__hash__() if self._affinityOrder is None or self._affinityOrder != orderHash: self._affinityOrder = orderHash self._affinityCache = {} if 0 != len(orderedEndpoints): affinityKey = (hash(key) % len(orderedEndpoints)) if affinityKey in self._affinityCache: z, z_ident = self._affinityCache[ affinityKey] else: z_ident, z = orderedEndpoints[affinityKey] z = _ZMREQ( z, isBind=False, private_key=self._private_key, congestionCB=self._reportCongestion) if z is not None: self._affinityCache[ affinityKey] = z, z_ident else: if 'random' == self._mode: try: endpoints = self._endpoints.keys() z_ident = endpoints[random.randint( 0, len(endpoints) - 1)] z = self._peerSockets[z_ident] except: z = None z_ident = None if z is None: gevent.sleep(0.1) except _TimeoutException: curRetry += 1 if z is not None and curRetry <= nRetries: envelope = { 'data': data, 'mtd': { 'ident': self._ident, 'req': requestType, 'id': str(uuid.uuid4()), 'dst': z_ident } } #qStart = time.time() ret = self._accountedSend(z, z_ident, envelope, timeout) ret = ActorResponse(ret) # If we hit a timeout or wrong dest we don't take chances # and remove that socket if not ret.isTimedOut and (ret.isSuccess or ret.error != 'wrong dest'): break else: #self._log( "Received failure (%s:%s) after %s: %s" % ( self._cat, requestType, ( time.time() - qStart ), str( ret ) ) ) if 999 == z.growthHist[0] or ret.error == 'wrong dest': self._log("Bad destination, recycling.") # There has been no new response in the last history timeframe, or it's a wrong dest. if 'affinity' == self._mode: self._affinityCache.pop(affinityKey, None) else: self._peerSockets.pop(z_ident, None) z.close() z = None curRetry += 1 if ret.error == 'wrong dest': self._updateDirectory() if ret is None or ret is False: ret = ActorResponse(ret) if ret.isTimedOut: self._log("Request failed after %s retries." % curRetry) if not ret.isSuccess and onFailure is not None: onFailure(data) return ret
def request( self, requestType, data = {}, timeout = None, key = None, nRetries = None, isWithFuture = False, onFailure = None ): '''Issue a request to the actor category of this handle. :param requestType: the type of request to issue :param data: a dict of the data associated with the request :param timeout: the number of seconds to wait for a response :param key: when used in 'affinity' mode, the key is the main parameter to evaluate to determine which Actor to send the request to, in effect it is the key to the hash map of Actors :param nRetries: the number of times the request will be re-sent if it times out, meaning a timeout of 5 and a retry of 3 could result in a request taking 15 seconds to return :param isWithFuture: return a Future instead of the actual response :param onFailure: execute this function callback on failure with a single argument that is the message :returns: the response to the request as an ActorResponse ''' z = None z_ident = None ret = False curRetry = 0 affinityKey = None # Short-circuit for cases where a category just isn't populated. if self._initialRefreshDone.isSet() and 0 == len( self._endpoints ): return ActorResponse( False ) if nRetries is None: nRetries = self._nRetries if nRetries is None: nRetries = 0 if timeout is None: timeout = self._timeout if 0 == timeout: timeout = None if isWithFuture: futureResult = FutureResults( 1 ) self._threads.add( gevent.spawn( withLogException( self._requestToFuture, actor = self._fromActor ), futureResult, requestType, data = data, timeout = timeout, key = key, nRetries = nRetries, isWithFuture = False, onFailure = onFailure ) ) return futureResult while curRetry <= nRetries: try: # We use the timeout to wait for an available node if none # exists with gevent.Timeout( timeout, _TimeoutException ): self._initialRefreshDone.wait( timeout = timeout if timeout is not None else self._timeout ) while z is None: if 'affinity' == self._mode and key is not None: # Affinity is currently a soft affinity, meaning the set of Actors # is not locked, if it changes, affinity is re-computed without migrating # any previous affinities. Therefore, I suggest a good cooldown before # starting to process with affinity after the Actors have been spawned. orderedEndpoints = sorted( self._endpoints.items(), key = lambda x: x.__getitem__( 0 ) ) orderHash = tuple( [ x[ 0 ] for x in orderedEndpoints ] ).__hash__() if self._affinityOrder is None or self._affinityOrder != orderHash: self._affinityOrder = orderHash self._affinityCache = {} if 0 != len( orderedEndpoints ): affinityKey = ( hash( key ) % len( orderedEndpoints ) ) if affinityKey in self._affinityCache: z, z_ident = self._affinityCache[ affinityKey ] else: z_ident, z = orderedEndpoints[ affinityKey ] z = _ZMREQ( z, isBind = False, private_key = self._private_key, congestionCB = self._reportCongestion ) if z is not None: self._affinityCache[ affinityKey ] = z, z_ident else: if 'random' == self._mode: try: endpoints = self._endpoints.keys() z_ident = endpoints[ random.randint( 0, len( endpoints ) - 1 ) ] z = self._peerSockets[ z_ident ] except: z = None z_ident = None if z is None: gevent.sleep( 0.1 ) except _TimeoutException: curRetry += 1 if z is not None and curRetry <= nRetries: envelope = { 'data' : data, 'mtd' : { 'ident' : self._ident, 'req' : requestType, 'id' : str( uuid.uuid4() ), 'dst' : z_ident } } #qStart = time.time() ret = self._accountedSend( z, z_ident, envelope, timeout ) ret = ActorResponse( ret ) # If we hit a timeout or wrong dest we don't take chances # and remove that socket if not ret.isTimedOut and ( ret.isSuccess or ret.error != 'wrong dest' ): break else: #self._log( "Received failure (%s:%s) after %s: %s" % ( self._cat, requestType, ( time.time() - qStart ), str( ret ) ) ) if 999 == z.growthHist[ 0 ] or ret.error == 'wrong dest': self._log( "Bad destination, recycling." ) # There has been no new response in the last history timeframe, or it's a wrong dest. if 'affinity' == self._mode: self._affinityCache.pop( affinityKey, None ) else: self._peerSockets.pop( z_ident, None ) z.close() z = None curRetry += 1 if ret.error == 'wrong dest': self._updateDirectory() if ret is None or ret is False: ret = ActorResponse( ret ) if ret.isTimedOut: self._log( "Request failed after %s retries." % curRetry ) if not ret.isSuccess and onFailure is not None: onFailure( data ) return ret
def _connectToNode( self, host ): nodeSocket = _ZMREQ( 'tcp://%s:%d' % ( host, self._opsPort ), isBind = False ) self._nodes[ host ] = { 'socket' : nodeSocket, 'info' : None } print( "Connected to node ops at: %s:%d" % ( host, self._opsPort ) )