def __init__(self, observableName, proxyToVortexName: str, additionalFilt=None, subscriptionsEnabled=True, observerName="default") -> None: """ Constructor :param observableName: The name of this and the other observable :param proxyToVortexName: The vortex dest name to proxy requests to :param additionalFilt: Any additional filter keys that are required :param subscriptionsEnabled: Should subscriptions be enabled (default) :param observerName: We can clash with other observers, so where there are multiple observers on the one vortex, they should use different names. """ TupleDataObservableCache.__init__(self) self._proxyToVortexName = proxyToVortexName self._subscriptionsEnabled = subscriptionsEnabled self._observerName = observerName self._filt = dict(name=observableName, key="tupleDataObservable") if additionalFilt: self._filt.update(additionalFilt) # Create the local observable, this allows local tuple providers # The rest are proxied on to the backend self._localObservableHandler = TupleDataObservableHandler( observableName, additionalFilt=additionalFilt, subscriptionsEnabled=subscriptionsEnabled) # Shutdown the local observables endpoint, we don't want it listening it's self self._localObservableHandler.shutdown() # Finally, Setup our endpoint self._endpoint = PayloadEndpoint(self._filt, self._process) TupleDataObservableCache.start(self)
def __init__(self, tupleActionProcessorName: str, additionalFilt: Optional[Dict] = None, defaultDelegate: Optional[TupleActionProcessorDelegateABC] = None, acceptOnlyFromVortex: Optional[Union[str,tuple]] = None, usedForProxy__=False) -> None: """ Constructor :param tupleActionProcessorName: The name of this observable :param additionalFilt: Any additional filter keys that are required :param defaultDelegate: The default delegate to send all actions to :param acceptOnlyFromVortex: Accept requests only from this vortex, The vortex can be str or tuple of str, or None to accept from any. """ self._tupleActionProcessorName = tupleActionProcessorName self._defaultDelegate = defaultDelegate self._tupleProcessorsByTupleName: Dict[str, TupleActionProcessorDelegateABC] = {} if not usedForProxy__: self._filt = dict(name=tupleActionProcessorName, key="tupleActionProcessorName") if additionalFilt: self._filt.update(additionalFilt) self._endpoint = PayloadEndpoint(self._filt, self._process, acceptOnlyFromVortex=acceptOnlyFromVortex)
def __init__( self, observableName, additionalFilt=None, subscriptionsEnabled=True, acceptOnlyFromVortex: Optional[Union[str, tuple]] = None, ): """Constructor :param observableName: The name of this observable :param additionalFilt: Any additional filter keys that are required :param subscriptionsEnabled: Should subscriptions be enabled (default) :param acceptOnlyFromVortex: Accept requests only from this vortex, The vortex can be str or tuple of str, or None to accept from any. """ self._observableName = observableName self._subscriptionsEnabled = subscriptionsEnabled self._filt = dict(name=observableName, key="tupleDataObservable") if additionalFilt: self._filt.update(additionalFilt) self._endpoint = PayloadEndpoint( self._filt, self._process, acceptOnlyFromVortex=acceptOnlyFromVortex) self._observerDataByTupleSelector = {} self._tupleProvidersByTupleName = {} self._tupleSelectorUpdateMappers = []
def __init__( self, tupleActionProcessorName, proxyToVortexName: str, additionalFilt=None, acceptOnlyFromVortex: Optional[Union[str, tuple]] = None) -> None: """ Constructor :param tupleActionProcessorName: The name of this and the other action handler :param proxyToVortexName: The vortex dest name to proxy requests to :param additionalFilt: Any additional filter keys that are required """ self._proxyToVortexName = proxyToVortexName self._delegateProcessor = TupleActionProcessor( tupleActionProcessorName=tupleActionProcessorName, usedForProxy__=True) self._filt = dict(name=tupleActionProcessorName, key="tupleActionProcessorName") if additionalFilt: self._filt.update(additionalFilt) self._endpoint = PayloadEndpoint( self._filt, self._process, acceptOnlyFromVortex=acceptOnlyFromVortex)
def __init__(self, payloadEnvelope: PayloadEnvelope, destVortexName: Optional[str] = None, destVortexUuid: Optional[str] = None, timeout: Optional[float] = None, resultCheck=True, logTimeoutError=True) -> None: """ Constructor Tag and optionally send a payload. The timeout starts as soon as the constructor is called. :param payloadEnvelope The payloadEnvelope to send to the remote and, and wait for a response for :param destVortexName The name of the vortex to send to. :param destVortexUuid The UUID of the vortex to send a payload to. :param timeout The timeout to wait for a response :param resultCheck Should the response payload.result be checked, if it fails it errback will be called. """ Deferred.__init__(self) if not timeout: timeout = self.TIMEOUT self._resultCheck = resultCheck self._logTimeoutError = logTimeoutError # uuid4 can have collisions self._messageId = str(uuid4()) + str(PayloadResponse.__SEQ) PayloadResponse.__SEQ += 1 payloadEnvelope.filt[self.__messageIdKey] = self._messageId self._filt = copy(payloadEnvelope.filt) self._destVortexName = destVortexName self._status = self.PROCESSING self._date = datetime.now(pytz.utc) self._endpoint = PayloadEndpoint(self._filt, self._process) if destVortexName or destVortexUuid: d: Deferred = payloadEnvelope.toVortexMsgDefer() d.addCallback(VortexFactory.sendVortexMsg, destVortexName=destVortexName, destVortexUuid=destVortexUuid) d.addErrback(self.errback) try: raise Exception() except: self._stack = sys.exc_info()[2] # noinspection PyTypeChecker self.addTimeout(timeout, reactor) self.addErrback(self._timedOut)
def __init__(self, clientId: str, tupleObservable: TupleDataObservableProxyHandler): self._clientId = clientId self._tupleObservable = tupleObservable #: This stores the cache of segment data for the clients self._cache: Dict[str, Dict[str, GraphDbTraceConfigTuple]] = defaultdict(dict) self._endpoint = PayloadEndpoint(clientTraceConfigUpdateFromServerFilt, self._processTraceConfigPayload)
def __init__(self, destVortexName, observableName, additionalFilt=None, observerName="default"): """ Constructor :param observableName: The name of this observable :param additionalFilt: Any additional filter keys that are required :param destVortexName: The dest vortex name to send the payloads to """ TupleDataObservableCache.__init__(self) self._destVortexName = destVortexName self._observableName = observableName self._observerName = observerName self._sendFilt = dict(name=observableName, observerName=observerName, key="tupleDataObservable") self._listenFilt = dict(name=observableName, key="tupleDataObservable") if additionalFilt: self._sendFilt.update(additionalFilt) self._listenFilt.update(additionalFilt) self._endpoint = PayloadEndpoint( self._listenFilt, self._receivePayload, acceptOnlyFromVortex=destVortexName ) # There are no online checks for the vortex # isOnlineSub = statusService.isOnline # .filter(online= > online == = true) # .subscribe(online= > self.vortexOnlineChanged()) # # self.onDestroyEvent.subscribe(() = > isOnlineSub.unsubscribe()) VortexFactory \ .subscribeToVortexStatusChange(destVortexName) \ .filter(lambda online: online is True) \ .subscribe(self._vortexOnlineChanged) TupleDataObservableCache.start(self)
def start(self, funcSelf=None): """ Start If this is a class method, then bind the function to the object passed in by bindToSelf. :param funcSelf: The object to bind the class instance methods self to. """ if VortexFactory.isVortexNameLocal(self.__listeningVortexName): self.__ep = PayloadEndpoint(self._filt, self._processCall) logger.debug("RPC %s listening", self.__funcName) else: logger.error( "Ignoring request to start listening for RPC %s " "as vortex name %s is not local", self.__funcName, self.__listeningVortexName) self.__funcSelf = funcSelf return self
def __init__(self, sessionFunctor, Declarative, payloadFilter, retreiveAll=False): ''' Create CRUD Hanlder This handler will perform crud operations for an SQLAlchemy Declarative ''' self._sessionFunctor = sessionFunctor self._Declarative = Declarative self._payloadFilter = (payloadFilter if isinstance( payloadFilter, dict) else { "key": payloadFilter }) self._ep = PayloadEndpoint(self._payloadFilter, self._process) self._ext = _OrmCrudExtensionProcessor() self._retreiveAll = retreiveAll
class GridCacheController(ACICacheControllerABC): """ Grid Cache Controller The grid cache controller stores all the grids in memory, allowing fast access from the mobile and desktop devices. NOTE: The grid set endpoint triggers a reload, this is in the case when grid sets are enable or disabled. Perhaps the system should just be restarted instead. """ _ChunkedTuple = EncodedGridTuple _chunkLoadRpcMethod = ClientGridLoaderRpc.loadGrids _updateFromServerFilt = clientGridUpdateFromServerFilt _logger = logger #: This stores the cache of grid data for the clients _cache: Dict[str, GridTuple] = None _LOAD_CHUNK_SIZE = 75 _LOAD_CHUNK_PARALLELISM = 4 def __init__(self, clientId: str): ACICacheControllerABC.__init__(self, clientId) self._coordSetEndpoint = PayloadEndpoint( clientCoordSetUpdateFromServerFilt, self._processCoordSetPayload) def _processCoordSetPayload(self, *args, **kwargs): d: Deferred = self.reloadCache() d.addErrback(vortexLogFailure, logger, consumeError=True) def shutdown(self): ACICacheControllerABC.shutdown(self) self._coordSetEndpoint.shutdown() self._coordSetEndpoint = None
def testClassStaysInScope(self): filt = {'key': 'unittest', 'This matches': 555} payload = Payload() payload.filt = filt inst = PayloadEndpointPyTestAssignPayload(self) PayloadEndpoint(filt, inst.process) PayloadIO().process(payloadEnvelope=payload.makePayloadEnvelope(), vortexUuid='test', vortexName='test', httpSession=None, sendResponse=lambda x: x) self.assertEqual( self.deliveredPayloadEnvelope, payload, 'PayloadIO/PayloadEndpoint unmatched value test error')
def _testBuild(self, plFilt, epFilt): payload = Payload() payload.filt.update(plFilt) for x in range(6): payload.filt['%s' % x] = x def processPayload(payloadEnvelope: PayloadEnvelope, **kwargs): self.deliveredPayloadEnvelope = payloadEnvelope self._keepFuncInScope = processPayload PayloadEndpoint(epFilt, processPayload) PayloadIO().process(payloadEnvelope=payload.makePayloadEnvelope(), vortexUuid='test', vortexName='test', httpSession=None, sendResponse=lambda x: x) return payload
def subScope(): def outOfScopeFunc(payloadEnvelope: PayloadEnvelope, *args, **kwargs): self.deliveredPayloadEnvelope = payloadEnvelope PayloadEndpoint(filt, outOfScopeFunc)
def _vortexTestTcpServerClient( self, printStatusEveryXMessage: Optional[int] = 1000, maxMessageSizeBytes: Optional[int] = None, exactMessageSizeBytes: Optional[int] = None, totalBytesToSend: Optional[int] = None, totalMessagesToSend: Optional[int] = None, payloadCompression=9, sendFromServerOnly=False): assert totalBytesToSend or totalMessagesToSend, "We must have a total to send" assert maxMessageSizeBytes or exactMessageSizeBytes, "We must have a msg size" state = self._State(self, sendFromServerOnly) checker = self._Checker(self, state, printStatusEveryXMessage) endpoint = PayloadEndpoint(self._loopbackFilt, checker.process) checker.setEndpoint(endpoint) # Make random chunks of data def makeData(): size = random() * maxMessageSizeBytes \ if exactMessageSizeBytes is None \ else exactMessageSizeBytes packet = str(random()) while len(packet) < size: packet += str(random()) if exactMessageSizeBytes is not None: packet = packet[:exactMessageSizeBytes] return packet def check(): if totalMessagesToSend and state.totalMessagesSent < totalMessagesToSend: return True if totalBytesToSend and state.totalSent < totalBytesToSend: return True return False # Send the data while check(): data = makeData() state.dataQueue.append(data) state.totalSent += len(data) state.totalMessagesSent += 1 vortexMsg = yield Payload(self._loopbackFilt, data) \ .makePayloadEnvelopeVortexMsgDefer(compressionLevel=payloadCompression) state.dataSizeStats.append(len(data)) state.vortexSizeStats.append(len(vortexMsg)) # We could send this from either the vortexClient or vortexServer # We only have one PayloadIO that the PayloadEndpoint binds to anyway state.vortex(len(data)).sendVortexMsg(vortexMsg) if not (state.totalMessagesSent % printStatusEveryXMessage): print("Sent message %s, this %s, total %s" % (state.totalMessagesSent, "{:,d}".format( len(data)), "{:,d}".format(state.totalSent))) # Wait for all the sending to complete yield state.dataQueueEmptyDeferred print("%s messages were out of order, the most out of order was %s" % (state.totalMessagesOutOfOrder, state.highestOutOfOrderIndex)) print("Sent %s from the vortex client" % "{:,d}".format(state.totalSentFromClient)) print("Sent %s from the vortex server" % "{:,d}".format(state.totalSentFromServer)) print( "Data : count %s, total size %s, max size %s, min size %s, average %s" % (state.totalMessagesSent, "{:,d}".format(state.totalSent), "{:,d}".format(max(state.dataSizeStats)), "{:,d}".format( min(state.dataSizeStats)), "{:,d}".format( int(state.totalSent / state.totalMessagesSent)))) totalVortexMsgs = sum(state.vortexSizeStats) print( "VortexMsg: count %s, total size %s, max size %s, min size %s, average %s" % (state.totalMessagesSent, "{:,d}".format(totalVortexMsgs), "{:,d}".format(max(state.vortexSizeStats)), "{:,d}".format( min(state.vortexSizeStats)), "{:,d}".format( int(totalVortexMsgs / state.totalMessagesSent)))) # Run our checks self.assertEqual(state.totalSent, state.totalReceived) self.assertFalse(len(state.dataQueue)) # Cleanup endpoint.shutdown() del checker del state del endpoint gc.collect()
def subScope(): inst = PayloadEndpointPyTestAssignPayload(self) PayloadEndpoint(filt, inst.process)
class TupleActionProcessor: def __init__(self, tupleActionProcessorName: str, additionalFilt: Optional[Dict] = None, defaultDelegate: Optional[TupleActionProcessorDelegateABC] = None, acceptOnlyFromVortex: Optional[Union[str,tuple]] = None, usedForProxy__=False) -> None: """ Constructor :param tupleActionProcessorName: The name of this observable :param additionalFilt: Any additional filter keys that are required :param defaultDelegate: The default delegate to send all actions to :param acceptOnlyFromVortex: Accept requests only from this vortex, The vortex can be str or tuple of str, or None to accept from any. """ self._tupleActionProcessorName = tupleActionProcessorName self._defaultDelegate = defaultDelegate self._tupleProcessorsByTupleName: Dict[str, TupleActionProcessorDelegateABC] = {} if not usedForProxy__: self._filt = dict(name=tupleActionProcessorName, key="tupleActionProcessorName") if additionalFilt: self._filt.update(additionalFilt) self._endpoint = PayloadEndpoint(self._filt, self._process, acceptOnlyFromVortex=acceptOnlyFromVortex) def setDelegate(self, tupleName: str, processor: TupleActionProcessorDelegateABC): """ Add Tuple Action Processor Delegate :param tupleName: The tuple name to process actions for. :param processor: The processor to use for processing this tuple name. """ assert not tupleName in self._tupleProcessorsByTupleName, ( "TupleActionProcessor:%s, Tuple name %s is already registered" % (self._tupleActionProcessorName, tupleName)) assert isinstance(processor, TupleActionProcessorDelegateABC), ( "TupleActionProcessor:%s, provider must be an" " instance of TupleActionProcessorDelegateABC" % self._tupleActionProcessorName) self._tupleProcessorsByTupleName[tupleName] = processor @property def delegateCount(self) -> int: return len(self._tupleProcessorsByTupleName) def hasDelegate(self, tupleName: str) -> bool: return tupleName in self._tupleProcessorsByTupleName def shutdown(self): self._endpoint.shutdown() @inlineCallbacks def _process(self, payloadEnvelope: PayloadEnvelope, sendResponse: SendVortexMsgResponseCallable, **kwargs): """ Process the Payload / Tuple Action """ payload = yield payloadEnvelope.decodePayloadDefer() assert len(payload.tuples) == 1, ( "TupleActionProcessor:%s Expected 1 tuples, received %s" % ( self._tupleActionProcessorName, len(payload.tuples))) tupleAction = payload.tuples[0] self._processTupleAction(payloadEnvelope.filt, sendResponse, tupleAction) def _processTupleAction(self, payloadEnvelopeFilt, sendResponse, tupleAction): assert isinstance(tupleAction, TupleActionABC), \ "TupleActionProcessor:%s Expected TupleAction, received %s" \ % (self._tupleActionProcessorName, tupleAction.__class__) tupleName = tupleAction.tupleName() processor = self._tupleProcessorsByTupleName.get(tupleName) if processor: delegate = processor.processTupleAction elif self._defaultDelegate: delegate = self._defaultDelegate.processTupleAction else: raise Exception("No delegate registered for %s" % tupleName) d = self._customMaybeDeferred(delegate, tupleAction) d.addCallback(self._callback, payloadEnvelopeFilt, tupleName, sendResponse) d.addErrback(self._errback, payloadEnvelopeFilt, tupleName, sendResponse) @inlineCallbacks def _callback(self, result, replyFilt: dict, tupleName: str, sendResponse: SendVortexMsgResponseCallable): if not isinstance(result, list): result = [result] payload = Payload(filt=replyFilt, tuples=result) payloadEnvelope = yield payload.makePayloadEnvelopeDefer() vortexMsg = yield payloadEnvelope.toVortexMsgDefer() try: yield sendResponse(vortexMsg) except Exception as e: logger.error("Failed to send TupleAction response for %s", tupleName) logger.exception(e) @inlineCallbacks def _errback(self, result: Failure, replyFilt: dict, tupleName: str, sendResponse: SendVortexMsgResponseCallable): logger.error("TupleActionProcessor:%s Failed to process TupleActon", self._tupleActionProcessorName) vortexLogFailure(result, logger) failureMessage = result.getErrorMessage() payloadEnvelope = PayloadEnvelope(filt=replyFilt, result=failureMessage) vortexMsg = yield payloadEnvelope.toVortexMsgDefer() try: yield sendResponse(vortexMsg) except Exception as e: logger.error("Failed to send TupleAction response for %s\n%s", tupleName, failureMessage) logger.exception(e) def _customMaybeDeferred(self, f, *args, **kw): try: result = f(*args, **kw) except Exception as e: return fail(failure.Failure(e)) if isinstance(result, Deferred): return result elif isinstance(result, failure.Failure): return fail(result) else: return succeed(result)
def __init__(self, payloadFilter): self._ep = PayloadEndpoint(payloadFilter, self._process)
class OrmCrudHandler(object): UPDATE = 1 CREATE = 2 DELETE = 3 QUERY = 4 def __init__(self, sessionFunctor, Declarative, payloadFilter, retreiveAll=False): ''' Create CRUD Hanlder This handler will perform crud operations for an SQLAlchemy Declarative ''' self._sessionFunctor = sessionFunctor self._Declarative = Declarative self._payloadFilter = (payloadFilter if isinstance( payloadFilter, dict) else { "key": payloadFilter }) self._ep = PayloadEndpoint(self._payloadFilter, self._process) self._ext = _OrmCrudExtensionProcessor() self._retreiveAll = retreiveAll def shutdown(self): self._ep.shutdown() def addExtension(self, Tuple, ormCrudHandlerExtension=None): if ormCrudHandlerExtension: self._ext.addExtensionObject(Tuple, ormCrudHandlerExtension) return return self._ext.addExtensionClassDecorator(Tuple) @inlineCallbacks def _process(self, *args, **kwargs): val = yield self._processInThread(*args, **kwargs) return val @deferToThreadWrapWithLogger(logger) def _processInThread(self, payloadEnvelope: PayloadEnvelope, vortexUuid: str, sendResponse: Callable[[Union[VortexMsgList, bytes]], None], **kwargs): # Execute preprocess functions if self.preProcess(payloadEnvelope, vortexUuid, **kwargs) != None: return # Create reply payload replyFilt replyFilt = copy(self._payloadFilter) if payloadEnvelope.filt: replyFilt.update(payloadEnvelope.filt) # Get data from the payload phId = payloadEnvelope.filt.get(plIdKey) delete = payloadEnvelope.filt.get(plDeleteKey, False) # Setup variables to populate replyPayloadEnvelope: PayloadEnvelope = None action = None tuples = [] if payloadEnvelope.encodedPayload: tuples = payloadEnvelope.decodePayload().tuples session = self._getSession() # Execute the action try: if delete == True: action = self.DELETE replyPayloadEnvelope = self._delete(session, tuples, phId, payloadEnvelope.filt) elif len(tuples): action = self.UPDATE replyPayloadEnvelope = self._update(session, tuples, payloadEnvelope.filt) elif phId != None: action = self.QUERY replyPayloadEnvelope = self._retrieve(session, phId, payloadEnvelope.filt) elif len(tuples) == 0: action = self.CREATE replyPayloadEnvelope = self._create(session, payloadEnvelope.filt) else: session.close() raise Exception("Invalid ORM CRUD parameter state") except Exception as e: replyPayloadEnvelope = PayloadEnvelope(result=str(e), filt=replyFilt) sendResponse(replyPayloadEnvelope.toVortexMsg()) try: session.rollback() except: pass session.close() raise # Prefer reply filt, if not combine our accpt filt with the filt we were sent # Ensure any delegates are playing nice with the result if action in (self.DELETE, self.UPDATE) and replyPayloadEnvelope.result is None: replyPayloadEnvelope.result = True replyPayloadEnvelope.filt = replyFilt sendResponse(replyPayloadEnvelope.toVortexMsg()) # Execute the post process function self.postProcess(action, payloadEnvelope.filt, vortexUuid) session.commit() session.close() def _getSession(self): if isinstance(self._sessionFunctor, Session): return self._sessionFunctor return self._sessionFunctor() def _getDeclarativeById(self, session, id_): qry = session.query(self._Declarative) if self._retreiveAll and id_ is None: return qry.all() try: return qry.filter(self._Declarative.id == id_).one() except NoResultFound as e: return None def createDeclarative(self, session, payloadFilt): if self._retreiveAll: return session.query(self._Declarative).all() return [self._Declarative()] def _getDeclarativeByTuple(self, session, tuple_): T = tuple_.__class__ return session.query(T).filter(T.id == tuple_.id).one() def _create(self, session, payloadFilt) -> PayloadEnvelope: tuples = self.createDeclarative(session, payloadFilt) payload = Payload(tuples=tuples) self._ext.afterCreate(payload.tuples, session, payloadFilt) return payload.makePayloadEnvelope() def _retrieve(self, session, filtId, payloadFilt, obj=None, **kwargs) -> PayloadEnvelope: ph = obj if obj else self._getDeclarativeById(session, filtId) payload = Payload() payload.tuples = [ph] if ph else [] self._ext.afterRetrieve(payload.tuples, session, payloadFilt) return payload.makePayloadEnvelope() def _update(self, session, tuples, payloadFilt) -> PayloadEnvelope: self._ext.beforeUpdate(tuples, session, payloadFilt) # Add everything first. for tupleObj in tuples: if tupleObj is None: raise Exception("None/null was present in array of tuples") # Make sure it's not '' if tupleObj.id == '': tupleObj.id = None if tupleObj.id is None: session.add(tupleObj) self._ext.middleUpdate(tuples, session, payloadFilt) # Now merge with the session returnTuples = [] for tupleObj in tuples: if tupleObj.id is None: # If this was a create, then we can just return this tuple returnTuples.append(tupleObj) else: # Otherwise use the merge method to perform the update magic. # and add the resulting merged tuple to the return list returnTuples.append(session.merge(tupleObj)) self._ext.afterUpdate(returnTuples, session, payloadFilt) session.commit() self._ext.afterUpdateCommit(returnTuples, session, payloadFilt) return Payload(tuples=returnTuples).makePayloadEnvelope(result=True) def _delete(self, session, tuples, filtId, payloadFilt) -> PayloadEnvelope: self._ext.beforeDelete(tuples, session, payloadFilt) if len(tuples): phIds = [t.id for t in tuples] else: phIds = [filtId] for phId in phIds: ph = self._getDeclarativeById(session, phId) try: # Try to iterate it for item in iter(ph): session.delete(item) except TypeError: # If it's not an iterator if ph is not None: session.delete(ph) session.commit() returnTuples: List[Tuple] = [] if self._retreiveAll: returnTuples = self.createDeclarative(session, payloadFilt) self._ext.afterDeleteCommit(tuples, session, payloadFilt) return Payload(tuples=returnTuples).makePayloadEnvelope(result=True) def sendModelUpdate(self, objId, vortexUuid=None, session=None, obj=None, **kwargs): session = session if session else self._getSession() pl = self._retrieve(session, objId, self._payloadFilter, obj=obj) pl.filt.update(self._payloadFilter) pl.filt[plIdKey] = objId VortexFactory.sendVortexMsg(pl.toVortexMsg(), destVortexUuid=vortexUuid) def preProcess(self, payload, vortextUuid, **kwargs): pass def postProcess(self, action, payloadFilt, vortextUuid): pass
class PayloadResponse(Deferred): """ Payload Response This class is used to catch responses from a sent payload. If the remote end is going to send back a payload, with the same filt, this class can be used to capture this can and call it's call back. If the response is not received within the timeout, the errback is called. Here is some example usage. :: logger = logging.getLogger("Example Payload Response") payload = Payload(filt={"rapuiServerEcho":True}) responseDeferred = PayloadResponse(payload) VortexFactory.sendVortexMsg(payload.toVortexMsg()) logger.info("Payload Sent") responseDeferred.addCallbacks(logger.info, logger.error) """ __messageIdKey = "PayloadResponse.messageId" PROCESSING = "Processing" # NO_ENDPOINT = "No Endpoint" FAILED = "Failed" SUCCESS = "Success" TIMED_OUT = "Timed Out" TIMEOUT: float = 30.00 __SEQ = 1 def __init__(self, payloadEnvelope: PayloadEnvelope, destVortexName: Optional[str] = None, destVortexUuid: Optional[str] = None, timeout: Optional[float] = None, resultCheck=True, logTimeoutError=True) -> None: """ Constructor Tag and optionally send a payload. The timeout starts as soon as the constructor is called. :param payloadEnvelope The payloadEnvelope to send to the remote and, and wait for a response for :param destVortexName The name of the vortex to send to. :param destVortexUuid The UUID of the vortex to send a payload to. :param timeout The timeout to wait for a response :param resultCheck Should the response payload.result be checked, if it fails it errback will be called. """ Deferred.__init__(self) if not timeout: timeout = self.TIMEOUT self._resultCheck = resultCheck self._logTimeoutError = logTimeoutError # uuid4 can have collisions self._messageId = str(uuid4()) + str(PayloadResponse.__SEQ) PayloadResponse.__SEQ += 1 payloadEnvelope.filt[self.__messageIdKey] = self._messageId self._filt = copy(payloadEnvelope.filt) self._destVortexName = destVortexName self._status = self.PROCESSING self._date = datetime.now(pytz.utc) self._endpoint = PayloadEndpoint(self._filt, self._process) if destVortexName or destVortexUuid: d: Deferred = payloadEnvelope.toVortexMsgDefer() d.addCallback(VortexFactory.sendVortexMsg, destVortexName=destVortexName, destVortexUuid=destVortexUuid) d.addErrback(self.errback) try: raise Exception() except: self._stack = sys.exc_info()[2] # noinspection PyTypeChecker self.addTimeout(timeout, reactor) self.addErrback(self._timedOut) @classmethod def isResponsePayloadEnvelope(cls, payloadEnvelope: PayloadEnvelope): """ Is Response Payload Envelope The PayloadResponse tags the payloads, so it expects a unique message back. :returns: True if this payload has been tagged by a PayloadResponse class """ return cls.__messageIdKey in payloadEnvelope.filt @property def status(self): return self._status def _timedOut(self, failure: Failure): if self._endpoint: self._endpoint.shutdown() self._endpoint = None if self._logTimeoutError: logger.error("Timed out for payload %s", self._filt) self._status = self.TIMED_OUT return failure def _process(self, payloadEnvelope: PayloadEnvelope, vortexName, **kwargs): if self._endpoint: self._endpoint.shutdown() self._endpoint = None if self._destVortexName and vortexName != self._destVortexName: logger.debug( "Received response from a vortex other than the dest vortex, " "Expected %s, Received %s", self._destVortexName, vortexName) return if self.called: logger.error("Received response after timeout for %s" % self._filt) return if self._resultCheck and not payloadEnvelope.result in (None, True): self._status = self.FAILED self.errback( Failure( Exception(payloadEnvelope.result).with_traceback( self._stack))) else: self._status = self.SUCCESS self.callback(payloadEnvelope) # Return self, so that PayloadIO knows this is a asyncronous method return self
class _VortexRPC: """ Vortex RPC Wrapper Class This wrapper class handles the mechanics of listening for the RPC calls (handler) and sending calls (PayloadResponse) Under the covers, This is what the class. #. The caller will be given a Deferred #. The args, kwargs for Tuples and primitive values will be serialised into payload. #. The payload will be sent across the vortex #. The payload will be deserialized and delivered to a PayloadEndpoint created for the decorated method. #. The handler will then call the method. #. The result from the method will then be serialised and sent back to the calling vortex. #. The deferred will be called with the result from the remote method. """ __registeredFuncNames: Set[str] = set() def __init__(self, func, listeningVortexName: str, timeoutSeconds: float, acceptOnlyFromVortex: Optional[Union[str, tuple]], additionalFilt: dict, deferToThread: bool, inlineCallbacks: bool) -> None: """ :param listeningVortexName: If the local vortex name matches this name, then a handler will be setup to listen for payloads for this RPC method. :param timeoutSeconds: The seconds to wait for a response before calling the deferreds errback with a TimeoutError :param acceptOnlyFromVortex: Accept payloads (calls) only from this vortex. The vortex can be str or tuple of str, or None to accept from any. :param additionalFilt: If specified, the items from this dict will be added to the filt that this RPCs handler listens on. :param deferToThread: Should the function be called in a thread, or in the reactors main loop. :param inlineCallbacks: Should the function be wrapped in the twisted @inlinecallbacks decorator before it's called?. """ self.__func = func self.__funcSelf = None self.__listeningVortexName = listeningVortexName self.__timeoutSeconds = timeoutSeconds self.__acceptOnlyFromVortex = acceptOnlyFromVortex if isinstance(self.__acceptOnlyFromVortex, str): self.__acceptOnlyFromVortex = (self.__acceptOnlyFromVortex, ) self.__deferToThread = deferToThread self.__inlineCallbacks = inlineCallbacks self.__funcName = '' if func.__globals__["__spec__"]: self.__funcName += func.__globals__["__spec__"].name self.__funcName += "." + func.__qualname__ if self.__funcName in self.__registeredFuncNames: raise Exception("RPC function name %s is already registered" % self.__funcName) self.__registeredFuncNames.add(self.__funcName) # Define the FILT self._filt = {'_internal': 'vortexRPC', 'key': self.__funcName} self._filt.update(additionalFilt) # Define the Endpoint def start(self, funcSelf=None): """ Start If this is a class method, then bind the function to the object passed in by bindToSelf. :param funcSelf: The object to bind the class instance methods self to. """ if VortexFactory.isVortexNameLocal(self.__listeningVortexName): self.__ep = PayloadEndpoint(self._filt, self._processCall) logger.debug("RPC %s listening", self.__funcName) else: logger.error( "Ignoring request to start listening for RPC %s " "as vortex name %s is not local", self.__funcName, self.__listeningVortexName) self.__funcSelf = funcSelf return self def shutdown(self): """ Shutdown Shuts down the RPC PayloadEndpoint """ self.__ep.shutdown() self.__func = None self.__funcSelf = None @inlineCallbacks def _processCall(self, payloadEnvelope: PayloadEnvelope, vortexName, sendResponse, *args, **kwargs): """ Process Process the incoming RPC call payloads. """ # If the sending vortex, is local, then ignore it, RPC can not be called locally if VortexFactory.isVortexNameLocal(vortexName): logger.warning( "Received RPC call to %s, from local vortex %s, ignoring it", self.__funcName, vortexName) return # Apply the "allow" logic if self.__acceptOnlyFromVortex and vortexName not in self.__acceptOnlyFromVortex: logger.debug( "Call from non-accepted vortex %s, allowing only from %s", vortexName, str(self.__acceptOnlyFromVortex)) return # Get the args tuple payload = yield payloadEnvelope.decodePayloadDefer() argsTuple = payload.tuples[0] assert isinstance( argsTuple, _VortexRPCArgTuple), ("argsTuple is not an instance of %s" % _VortexRPCArgTuple) logger.debug("Received RPC call for %s", self.__funcName) # Call the method and setup the callbacks result = yield self.callLocally(argsTuple.args, argsTuple.kwargs) yield self._processCallCallback(result, sendResponse, payloadEnvelope.filt) @inlineCallbacks def _processCallCallback(self, result, sendResponseCallable, filt): payloadEnvelope = yield (Payload( filt=filt, tuples=[_VortexRPCResultTuple(result=result) ]).makePayloadEnvelopeDefer()) vortexMsg = yield payloadEnvelope.toVortexMsgDefer() yield sendResponseCallable(vortexMsg, RPC_PRIORITY) @inlineCallbacks def __call__(self, *args, **kwargs): """ Call """ yesMainThread() try: # FAKE Exception so we can raise a better stack trace later raise Exception() except: stack = sys.exc_info()[2] logger.debug("Calling RPC for %s", self.__funcName) payloadEnvelope = yield (Payload( filt=copy(self._filt), tuples=[_VortexRPCArgTuple(args=args, kwargs=kwargs) ]).makePayloadEnvelopeDefer(compressionLevel=4)) pr = PayloadResponse(payloadEnvelope, timeout=self.__timeoutSeconds, resultCheck=False, logTimeoutError=False, destVortexName=self.__listeningVortexName) # Delete the payload, we don't need to keep it in memory while we # get the result. del payloadEnvelope pr.addCallback(self._processResponseCallback, stack) pr.addErrback(self._processResponseErrback, stack) val = yield pr return val @inlineCallbacks def _processResponseCallback(self, payloadEnvelope: PayloadEnvelope, stack): """ Process Response Callback Convert the PayloadResponse payload to the result from the remotely called method. """ if not payloadEnvelope.result in (None, True): return Failure(Exception( payloadEnvelope.result).with_traceback(stack), exc_tb=stack) # Get the Result from the payload payload = yield payloadEnvelope.decodePayloadDefer() resultTuple = payload.tuples[0] assert isinstance( resultTuple, _VortexRPCResultTuple), ("resultTuple is not an instance of %s" % _VortexRPCResultTuple) logger.debug("Received RPC result for %s", self.__funcName) # Return the remote result return resultTuple.result def _processResponseErrback(self, failure, stack): """ Process Response Errback Convert the PayloadResponse payload to the result from the remotely called method. """ if failure.check(TimeoutError): logger.error("Received RPC timeout for %s", self.__funcName) return Failure(Exception("RPC call timed out for %s", self.__funcName).with_traceback(stack), exc_tb=stack) return failure def callLocally(self, args, kwargs): """ Call Locally This method calls the wrapped function locally, ensuring it returns a deferred as it's result. """ try: if self.__funcSelf: args = [self.__funcSelf] + args if self.__inlineCallbacks: result = inlineCallbacks(self.__func)(*args, **kwargs) elif self.__deferToThread: result = deferToThread(self.__func, *args, **kwargs) else: result = self.__func(*args, **kwargs) except Exception as e: return fail(Failure(e)) if isinstance(result, Deferred): return result elif isinstance(result, Failure): return fail(result) else: return succeed(result)
class TupleDataObservableProxyHandler(TupleDataObservableCache): __CHECK_PERIOD = 30 # seconds def __init__(self, observableName, proxyToVortexName: str, additionalFilt=None, subscriptionsEnabled=True, observerName="default") -> None: """ Constructor :param observableName: The name of this and the other observable :param proxyToVortexName: The vortex dest name to proxy requests to :param additionalFilt: Any additional filter keys that are required :param subscriptionsEnabled: Should subscriptions be enabled (default) :param observerName: We can clash with other observers, so where there are multiple observers on the one vortex, they should use different names. """ TupleDataObservableCache.__init__(self) self._proxyToVortexName = proxyToVortexName self._subscriptionsEnabled = subscriptionsEnabled self._observerName = observerName self._filt = dict(name=observableName, key="tupleDataObservable") if additionalFilt: self._filt.update(additionalFilt) # Create the local observable, this allows local tuple providers # The rest are proxied on to the backend self._localObservableHandler = TupleDataObservableHandler( observableName, additionalFilt=additionalFilt, subscriptionsEnabled=subscriptionsEnabled) # Shutdown the local observables endpoint, we don't want it listening it's self self._localObservableHandler.shutdown() # Finally, Setup our endpoint self._endpoint = PayloadEndpoint(self._filt, self._process) TupleDataObservableCache.start(self) def shutdown(self): self._endpoint.shutdown() TupleDataObservableCache.shutdown(self) ## ----- Implement local observable def addTupleProvider(self, tupleName, provider: TuplesProviderABC): """ Add Tuple Provider Adds a tuple provider to the local observable. All other requests are proxied on """ self._localObservableHandler.addTupleProvider(tupleName, provider=provider) def notifyOfTupleUpdate(self, tupleSelector: TupleSelector) -> None: """ Notify of Tuple Update Notifies the local observable that tuples have been updated """ if not self._localObservableHandler.hasTupleProvider(tupleSelector.name): raise Exception("Local observable doesn't have tuple provider for %s" " registered, Proxy is : %s" % ( tupleSelector.name, self._filt )) self._localObservableHandler.notifyOfTupleUpdate(tupleSelector) ## ----- Implement proxy from here on in @inlineCallbacks def _process(self, payloadEnvelope: PayloadEnvelope, vortexUuid: str, vortexName: str, sendResponse: SendVortexMsgResponseCallable, **kwargs): if vortexName == self._proxyToVortexName: yield self._processUpdateFromBackend(payloadEnvelope) else: yield self._processSubscribeFromFrontend(payloadEnvelope, vortexUuid, sendResponse) def _processSubscribeFromFrontend(self, payloadEnvelope: PayloadEnvelope, vortexUuid: str, sendResponse: SendVortexMsgResponseCallable): tupleSelector: TupleSelector = payloadEnvelope.filt["tupleSelector"] # If the local observable provides this tuple, then use that instead if self._localObservableHandler.hasTupleProvider(tupleSelector.name): return self._localObservableHandler._process(payloadEnvelope=payloadEnvelope, vortexUuid=vortexUuid, sendResponse=sendResponse) # Add support for just getting data, no subscription. if payloadEnvelope.filt.get("unsubscribe", True): return self._handleUnsubscribe(tupleSelector, vortexUuid) elif payloadEnvelope.filt.get("subscribe", True) and self._subscriptionsEnabled: return self._handleSubscribe(payloadEnvelope, tupleSelector, sendResponse, vortexUuid) else: return self._handlePoll(payloadEnvelope, tupleSelector, sendResponse) def _handleUnsubscribe(self, tupleSelector: TupleSelector, vortexUuid: str): if not self._hasTupleSelector(tupleSelector): return cache = self._getCache(tupleSelector) try: cache.vortexUuids.remove(vortexUuid) except KeyError: pass def _handleSubscribe(self, payloadEnvelope: PayloadEnvelope, tupleSelector: TupleSelector, sendResponse: SendVortexMsgResponseCallable, vortexUuid: str): # Add support for just getting data, no subscription. cache = self._getCache(tupleSelector) if cache and cache.lastServerPayloadDate is not None and cache.cacheEnabled: respPayloadEnvelope = PayloadEnvelope(filt=payloadEnvelope.filt, encodedPayload=cache.encodedPayload, date=cache.lastServerPayloadDate) d = respPayloadEnvelope.toVortexMsgDefer() d.addCallback(sendResponse) d.addErrback(vortexLogFailure, logger, consumeError=True) elif cache: self._sendRequestToServer(payloadEnvelope) else: cache = self._makeCache(tupleSelector) self._sendRequestToServer(payloadEnvelope) cache.vortexUuids.add(vortexUuid) # Allow the cache to be disabled cache.cacheEnabled = ( cache.cacheEnabled and not payloadEnvelope.filt.get("disableCache", False) ) def _sendRequestToServer(self, payloadEnvelope: PayloadEnvelope): payloadEnvelope.filt["observerName"] = self._observerName d = VortexFactory.sendVortexMsg(vortexMsgs=payloadEnvelope.toVortexMsg(), destVortexName=self._proxyToVortexName) d.addErrback(vortexLogFailure, logger, consumeError=True) def _sendUnsubscribeToServer(self, tupleSelector: TupleSelector): payloadEnvelope = PayloadEnvelope() payloadEnvelope.filt["tupleSelector"] = tupleSelector payloadEnvelope.filt["unsubscribe"] = True self._sendRequestToServer(payloadEnvelope) def _handlePoll(self, payloadEnvelope: PayloadEnvelope, tupleSelector: TupleSelector, sendResponse: SendVortexMsgResponseCallable): useCache = payloadEnvelope.filt.get('useCache', True) # Keep a copy of the incoming filt, in case they are using PayloadResponse responseFilt = copy(payloadEnvelope.filt) # Restore the original payload filt (PayloadResponse) and send it back def reply(payload): payload.filt = responseFilt d = payload.toVortexMsgDefer() d.addCallback(sendResponse) d.addErrback(vortexLogFailure, logger, consumeError=True) # logger.debug("Received response from observable") if useCache: cache = self._getCache(tupleSelector) if cache and cache.lastServerPayloadDate is not None and cache.cacheEnabled: payloadEnvelope.encodedPayload = cache.encodedPayload payloadEnvelope.date = cache.lastServerPayloadDate reply(payloadEnvelope) return # Track the response, log an error if it fails # 5 Seconds is long enough pr = PayloadResponse( payloadEnvelope, timeout=PayloadResponse.TIMEOUT - 5, # 5 seconds less logTimeoutError=False ) pr.addErrback(self._handlePrFailure, tupleSelector) pr.addErrback(vortexLogFailure, logger, consumeError=True) pr.addCallback(reply) self._sendRequestToServer(payloadEnvelope) def _handlePrFailure(self, f: Failure, tupleSelector): if f.check(TimeoutError): logger.error( "Received no response from\nobservable %s\ntuple selector %s", self._filt, tupleSelector.toJsonStr() ) else: logger.error( "Unexpected error, %s\nobservable %s\ntuple selector %s", f, self._filt, tupleSelector.toJsonStr() ) @deferToThreadWrapWithLogger(logger) def _processUpdateFromBackend(self, payloadEnvelope: PayloadEnvelope): tupleSelector: TupleSelector = payloadEnvelope.filt["tupleSelector"] if not self._hasTupleSelector(tupleSelector): return cache, requiredUpdate = self._updateCache(payloadEnvelope) if not requiredUpdate: return # Get / update the list of observing UUIDs observingUuids = cache.vortexUuids & set(VortexFactory.getRemoteVortexUuids()) if not observingUuids: return # Create the vortexMsg vortexMsg = payloadEnvelope.toVortexMsg() # Send the vortex messages for vortexUuid in observingUuids: d = VortexFactory.sendVortexMsg(vortexMsgs=vortexMsg, destVortexUuid=vortexUuid) d.addErrback(vortexLogFailure, logger, consumeError=True)
class TraceConfigCacheController: """ TraceConfig Cache Controller The TraceConfig cache controller stores all the chunks in memory, allowing fast access from the mobile and desktop devices. """ LOAD_CHUNK = 32 def __init__(self, clientId: str, tupleObservable: TupleDataObservableProxyHandler): self._clientId = clientId self._tupleObservable = tupleObservable #: This stores the cache of segment data for the clients self._cache: Dict[str, Dict[str, GraphDbTraceConfigTuple]] = defaultdict(dict) self._endpoint = PayloadEndpoint(clientTraceConfigUpdateFromServerFilt, self._processTraceConfigPayload) @inlineCallbacks def start(self): yield self.reloadCache() def shutdown(self): self._tupleObservable = None self._endpoint.shutdown() self._endpoint = None self._cache = defaultdict(dict) @inlineCallbacks def reloadCache(self): self._cache = defaultdict(dict) offset = 0 while True: logger.info("Loading TraceConfig %s to %s" % (offset, offset + self.LOAD_CHUNK)) traceConfigTuples: List[GraphDbTraceConfigTuple] = ( yield TraceConfigLoadRpc.loadTraceConfigs(offset, self.LOAD_CHUNK)) if not traceConfigTuples: break traceConfigsByModelSetKey = defaultdict(list) for trace in traceConfigTuples: traceConfigsByModelSetKey[trace.modelSetKey].append(trace) del traceConfigTuples for modelSetKey, traceConfigTuples in traceConfigsByModelSetKey.items( ): self._loadTraceConfigIntoCache(modelSetKey, traceConfigTuples) offset += self.LOAD_CHUNK @inlineCallbacks def _processTraceConfigPayload(self, payloadEnvelope: PayloadEnvelope, **kwargs): payload = yield payloadEnvelope.decodePayloadDefer() dataDict = payload.tuples[0] if payload.filt.get(plDeleteKey): modelSetKey = dataDict["modelSetKey"] traceConfigKeys = dataDict["traceConfigKeys"] self._removeTraceConfigFromCache(modelSetKey, traceConfigKeys) return modelSetKey = dataDict["modelSetKey"] traceConfigTuples: List[GraphDbTraceConfigTuple] = dataDict["tuples"] self._loadTraceConfigIntoCache(modelSetKey, traceConfigTuples) def _removeTraceConfigFromCache(self, modelSetKey: str, traceConfigKeys: List[str]): subCache = self._cache[modelSetKey] logger.debug("Received TraceConfig deletes from server, %s %s removed", modelSetKey, len(traceConfigKeys)) for traceConfigKey in traceConfigKeys: if traceConfigKey in subCache: subCache.pop(traceConfigKey) self._tupleObservable.notifyOfTupleUpdate( TupleSelector(GraphDbTraceConfigTuple.tupleType(), dict(modelSetKey=modelSetKey))) def _loadTraceConfigIntoCache( self, modelSetKey: str, traceConfigTuples: List[GraphDbTraceConfigTuple], deletedTraceConfigKeys: Set[str] = set()): subCache = self._cache[modelSetKey] traceKeysUpdated: Set[str] = { traceConfig.key for traceConfig in traceConfigTuples } deletedTraceConfigKeys -= traceKeysUpdated for traceConfig in traceConfigTuples: subCache[traceConfig.key] = traceConfig for traceConfigKey in deletedTraceConfigKeys: if traceConfigKey in subCache: subCache.pop(traceConfigKey) logger.debug( "Received TraceConfig updates from server," "%s %s removed, %s added/updated", modelSetKey, len(deletedTraceConfigKeys), len(traceKeysUpdated)) self._tupleObservable.notifyOfTupleUpdate( TupleSelector(GraphDbTraceConfigTuple.tupleType(), dict(modelSetKey=modelSetKey))) def traceConfigTuple(self, modelSetKey: str, traceConfigKey: str) -> GraphDbTraceConfigTuple: return self._cache[modelSetKey][traceConfigKey] def traceConfigTuples( self, modelSetKey: Optional[str]) -> List[GraphDbTraceConfigTuple]: if modelSetKey: return list(self._cache[modelSetKey].values()) configs = [] for configsByKey in self._cache.values(): configs += configsByKey.values() return configs
def __init__(self, clientId: str): ACICacheControllerABC.__init__(self, clientId) self._coordSetEndpoint = PayloadEndpoint( clientCoordSetUpdateFromServerFilt, self._processCoordSetPayload)
class TupleActionProcessorProxy: """ Tuple Data Observable Proxy Handler This class proxies the TupleActions onto another destination, giving the ability to pass through one services into another. EG, from a client facing python service to a server backend. """ def __init__( self, tupleActionProcessorName, proxyToVortexName: str, additionalFilt=None, acceptOnlyFromVortex: Optional[Union[str, tuple]] = None) -> None: """ Constructor :param tupleActionProcessorName: The name of this and the other action handler :param proxyToVortexName: The vortex dest name to proxy requests to :param additionalFilt: Any additional filter keys that are required """ self._proxyToVortexName = proxyToVortexName self._delegateProcessor = TupleActionProcessor( tupleActionProcessorName=tupleActionProcessorName, usedForProxy__=True) self._filt = dict(name=tupleActionProcessorName, key="tupleActionProcessorName") if additionalFilt: self._filt.update(additionalFilt) self._endpoint = PayloadEndpoint( self._filt, self._process, acceptOnlyFromVortex=acceptOnlyFromVortex) def shutdown(self): self._endpoint.shutdown() def setDelegate(self, tupleName: str, processor: TupleActionProcessorDelegateABC): """ Add Tuple Action Processor Delegate :param tupleName: The tuple name to process actions for. :param processor: The processor to use for processing this tuple name. """ self._delegateProcessor.setDelegate(tupleName, processor) @inlineCallbacks def _process(self, payloadEnvelope: PayloadEnvelope, vortexName: str, sendResponse: SendVortexMsgResponseCallable, **kwargs) -> None: # Ignore responses from the backend, these are handled by PayloadResponse if vortexName == self._proxyToVortexName: return # Shortcut the logic, so that we don't decode the payload unless we need to. if not self._delegateProcessor.delegateCount: yield self._processForProxy(payloadEnvelope, vortexName, sendResponse) return # If we have local processors, then work out if this tupleAction is meant for # the local processor. payload = yield payloadEnvelope.decodePayloadDefer() assert len(payload.tuples) == 1, ( "TupleActionProcessor:%s Expected 1 tuples, received %s" % (self._tupleActionProcessorName, len(payload.tuples))) tupleAction = payload.tuples[0] if self._delegateProcessor.hasDelegate(tupleAction.tupleName()): self._delegateProcessor._processTupleAction( payloadEnvelope.filt, sendResponse, tupleAction) return # Else, Just send it on to the delegate we're proxying for (the backend) yield self._processForProxy(payloadEnvelope, vortexName, sendResponse) @inlineCallbacks def _processForProxy(self, payloadEnvelope: PayloadEnvelope, vortexName: str, sendResponse: SendVortexMsgResponseCallable, **kwargs): # Keep a copy of the incoming filt, in case they are using PayloadResponse responseFilt = copy(payloadEnvelope.filt) # Track the response, log an error if it fails # 5 Seconds is long enough. # VortexJS defaults to 10s, so we have some room for round trip time. pr = PayloadResponse( payloadEnvelope, timeout=PayloadResponse.TIMEOUT - 5, # 5 seconds less resultCheck=False, logTimeoutError=False) # This is not a lambda, so that it can have a breakpoint def reply(payloadEnvelope: PayloadEnvelope): payloadEnvelope.filt = responseFilt d: Deferred = payloadEnvelope.toVortexMsgDefer() d.addCallback(sendResponse) return d pr.addCallback(reply) pr.addCallback( lambda _: logger.debug("Received action response from server")) pr.addErrback(self.__handlePrFailure, payloadEnvelope, sendResponse) vortexMsg = yield payloadEnvelope.toVortexMsgDefer() try: yield VortexFactory.sendVortexMsg( vortexMsgs=vortexMsg, destVortexName=self._proxyToVortexName) except Exception as e: logger.exception(e) @inlineCallbacks def __handlePrFailure(self, f: Failure, payloadEnvelope: PayloadEnvelope, sendResponse: SendVortexMsgResponseCallable): payload = yield payloadEnvelope.decodePayloadDefer() action = payload.tuples[0] if f.check(TimeoutError): logger.error("Received no response from\nprocessor %s\naction %s", self._filt, action) else: logger.error("Unexpected error, %s\nprocessor %s\naction %s", f, self._filt, action) vortexLogFailure(f, logger) vortexMsg = yield PayloadEnvelope(filt=payloadEnvelope.filt, result=str( f.value)).toVortexMsgDefer() sendResponse(vortexMsg)
class TupleDataObservableHandler: def __init__( self, observableName, additionalFilt=None, subscriptionsEnabled=True, acceptOnlyFromVortex: Optional[Union[str, tuple]] = None, ): """Constructor :param observableName: The name of this observable :param additionalFilt: Any additional filter keys that are required :param subscriptionsEnabled: Should subscriptions be enabled (default) :param acceptOnlyFromVortex: Accept requests only from this vortex, The vortex can be str or tuple of str, or None to accept from any. """ self._observableName = observableName self._subscriptionsEnabled = subscriptionsEnabled self._filt = dict(name=observableName, key="tupleDataObservable") if additionalFilt: self._filt.update(additionalFilt) self._endpoint = PayloadEndpoint( self._filt, self._process, acceptOnlyFromVortex=acceptOnlyFromVortex) self._observerDataByTupleSelector = {} self._tupleProvidersByTupleName = {} self._tupleSelectorUpdateMappers = [] def addTupleProvider(self, tupleName, provider: TuplesProviderABC): """Add Tuple Provider""" assert (not tupleName in self._tupleProvidersByTupleName ), "Observable:%s, Tuple name %s is already registered" % ( self._observableName, tupleName, ) assert isinstance(provider, TuplesProviderABC), ( "Observable:%s, provider must be an instance of TuplesProviderABC" % self._observableName) self._tupleProvidersByTupleName[tupleName] = provider def addTupleSelectorUpdateMapper(self, mapper: TupleSelectorUpdateMapperABC): """Add Tuple Selector Update Mapper This mapper will be called every time a tuple selector is notified of an update. """ self._tupleSelectorUpdateMappers.append(mapper) def hasTupleProvider(self, tupleName: str): return tupleName in self._tupleProvidersByTupleName def hasTupleSubscribers(self, tupleSelector: TupleSelector): return tupleSelector.toJsonStr() in self._observerDataByTupleSelector def shutdown(self): self._endpoint.shutdown() self._tupleSelectorUpdateMappers = [] self._tupleProvidersByTupleName = {} self._observerDataByTupleSelector = {} @inlineCallbacks def _createVortexMsg(self, filt, tupleSelector: TupleSelector) -> Deferred: tupleProvider = self._tupleProvidersByTupleName.get(tupleSelector.name) assert ( tupleProvider ), "Observable:%s, No providers registered for tupleName %s" % ( self._observableName, tupleSelector.name, ) try: vortexMsg = yield self._customMaybeDeferred( tupleProvider.makeVortexMsg, filt, tupleSelector) except Exception as e: logger.exception(e) raise if not vortexMsg: msg = f"TupleDataProvider did not return VortexPY message {tupleSelector}" logger.error(msg) raise Exception(msg) return vortexMsg @inlineCallbacks def _process( self, payloadEnvelope: PayloadEnvelope, vortexUuid: str, sendResponse: SendVortexMsgResponseCallable, **kwargs, ): tupleSelector = payloadEnvelope.filt["tupleSelector"] tsStr = tupleSelector.toJsonStr() observerDetails = _ObserverDetails( vortexUuid, payloadEnvelope.filt.get("observerName")) # Handle unsubscribe if payloadEnvelope.filt.get("unsubscribe"): if tsStr in self._observerDataByTupleSelector: try: self._observerDataByTupleSelector[tsStr].observers.remove( observerDetails) except KeyError: pass if not self._observerDataByTupleSelector[tsStr].observers: del self._observerDataByTupleSelector[tsStr] return # Add support for just getting data, no subscription. if self._subscriptionsEnabled and payloadEnvelope.filt.get( "subscribe", True): if tsStr not in self._observerDataByTupleSelector: self._observerDataByTupleSelector[tsStr] = _ObserverData( tupleSelector) self._observerDataByTupleSelector[tsStr].observers.add( observerDetails) vortexMsg = yield self._createVortexMsg(payloadEnvelope.filt, tupleSelector) try: yield sendResponse(vortexMsg) except Exception as e: logger.exception(e) def _getMappedTupleSelectors( self, tupleSelector: TupleSelector) -> List[TupleSelector]: # Get all tuple selectors allTupleSelectors = [ data.tupleSelector for data in self._observerDataByTupleSelector.values() ] # Create a dict so we end up with only unique ones tupleSelectorByStr = {tupleSelector.toJsonStr(): tupleSelector} # Run through the mappers for mapper in self._tupleSelectorUpdateMappers: mappedSelectors = mapper.mapTupleSelector(tupleSelector, allTupleSelectors) if mappedSelectors: tupleSelectorByStr.update( {ts.toJsonStr(): ts for ts in mappedSelectors}) # Return a list of tuple selectors return list(tupleSelectorByStr.values()) def notifyOfTupleUpdate(self, tupleSelector: TupleSelector) -> None: """Notify Of Tuple Update This method tells the observable that an update has occurred and that it should send new data to it's observers. Tuple selectors should be identical to the data being observed. :param tupleSelector: A tuple selector that describes the scope of the update. :returns: None """ tupleSelectors = self._getMappedTupleSelectors(tupleSelector) reactor.callLater(0, self._notifyOfTupleUpdateInMain, tupleSelectors) def notifyOfTupleUpdateForTuple(self, tupleName: str) -> None: """Notify of Tuple Update for Tuple Like the above notification method, this method will aquire new data and send it to observers. .. note:: Calling this will not trigger the TupleSelector Update Mappers as this is an earlier soloution. For all new code, use notifyOfTupleUpdate() and define mappers. """ tupleSelectors = [] for data in self._observerDataByTupleSelector.values(): if data.tupleSelector.name == tupleName: tupleSelectors.append(data.tupleSelector) reactor.callLater(0, self._notifyOfTupleUpdateInMain, tupleSelectors) @inlineCallbacks def _notifyOfTupleUpdateInMain(self, tupleSelectors: List[TupleSelector]): for tupleSelector in tupleSelectors: yield self._notifyOfTupleUpdateInMainOne(tupleSelector) @inlineCallbacks def _notifyOfTupleUpdateInMainOne(self, tupleSelector: TupleSelector): tsStr = tupleSelector.toJsonStr() if tsStr not in self._observerDataByTupleSelector: return # Filter out the offline observables onlineUuids = set(VortexFactory.getRemoteVortexUuids()) observers = self._observerDataByTupleSelector[tsStr].observers for od in list(observers): if od.vortexUuid not in onlineUuids: observers.remove(od) # Get / update the list of observing UUIDs if not observers: del self._observerDataByTupleSelector[tsStr] return # Create the vortexMsg filt = dict(tupleSelector=tupleSelector) filt.update(self._filt) vortexMsg = yield self._createVortexMsg(filt, tupleSelector) # We can have multiple Observable clients on the one vortex, so make sure # we only send one message for these. destVortexUuids = set([od.vortexUuid for od in observers]) # Send the vortex messages for destVortexUuid in destVortexUuids: d = VortexFactory.sendVortexMsg(vortexMsgs=vortexMsg, destVortexUuid=destVortexUuid) d.addErrback(vortexLogFailure, logger) def _customMaybeDeferred(self, f, *args, **kw): try: result = f(*args, **kw) except Exception as e: logger.error( "TupleDataObservableHandler:%s TupleProvider failed", self._observableName, ) logger.exception(e) return fail(failure.Failure(e)) if isinstance(result, Deferred): return result elif isinstance(result, failure.Failure): return fail(result) else: return succeed(result)
class TupleDataObserverClient(TupleDataObservableCache): def __init__(self, destVortexName, observableName, additionalFilt=None, observerName="default"): """ Constructor :param observableName: The name of this observable :param additionalFilt: Any additional filter keys that are required :param destVortexName: The dest vortex name to send the payloads to """ TupleDataObservableCache.__init__(self) self._destVortexName = destVortexName self._observableName = observableName self._observerName = observerName self._sendFilt = dict(name=observableName, observerName=observerName, key="tupleDataObservable") self._listenFilt = dict(name=observableName, key="tupleDataObservable") if additionalFilt: self._sendFilt.update(additionalFilt) self._listenFilt.update(additionalFilt) self._endpoint = PayloadEndpoint( self._listenFilt, self._receivePayload, acceptOnlyFromVortex=destVortexName ) # There are no online checks for the vortex # isOnlineSub = statusService.isOnline # .filter(online= > online == = true) # .subscribe(online= > self.vortexOnlineChanged()) # # self.onDestroyEvent.subscribe(() = > isOnlineSub.unsubscribe()) VortexFactory \ .subscribeToVortexStatusChange(destVortexName) \ .filter(lambda online: online is True) \ .subscribe(self._vortexOnlineChanged) TupleDataObservableCache.start(self) def shutdown(self): self._endpoint.shutdown() TupleDataObservableCache.shutdown(self) def pollForTuples( self, tupleSelector: TupleSelector, logTimeoutError: bool = True ) -> Deferred: startFilt = copy(self._sendFilt) startFilt.update({"subscribe": False, "tupleSelector": tupleSelector}) def updateCacheCallback( payloadEnvelope: PayloadEnvelope, ) -> PayloadEnvelope: cache, _ = self._updateCache(payloadEnvelope) return payloadEnvelope pr = PayloadResponse( payloadEnvelope=PayloadEnvelope(startFilt), destVortexName=self._destVortexName, logTimeoutError=logTimeoutError, ) pr.addCallback(updateCacheCallback) pr.addCallback( lambda payloadEnvelope: payloadEnvelope.decodePayloadDefer() ) pr.addCallback(lambda payload: payload.tuples) return pr def subscribeToTupleSelector(self, tupleSelector: TupleSelector) -> Subject: cache = self._makeCache(tupleSelector) self._tellServerWeWantData([tupleSelector]) return cache.subject def _vortexOnlineChanged(self, *args) -> None: self._tellServerWeWantData(self._tupleSelectors()) @inlineCallbacks def _receivePayload(self, payloadEnvelope: PayloadEnvelope, **kwargs): # If this message is for a specific observer, and it's not us, then discard it. filtObserverName = payloadEnvelope.filt.get("observerName") if filtObserverName is not None and filtObserverName != self._observerName: return # If this message is an error response, then don't process it. if payloadEnvelope.result not in (None, True): logger.error("Vortex responded with error : %s" % payloadEnvelope.result) logger.error(str(payloadEnvelope.filt)) return tupleSelector: TupleSelector = payloadEnvelope.filt["tupleSelector"] if not self._hasTupleSelector(tupleSelector): return cache, requiredUpdate = self._updateCache(payloadEnvelope) if not requiredUpdate: return payload = yield Payload().fromEncodedPayloadDefer(cache.encodedPayload) cache.subject.on_next(payload.tuples) def _tellServerWeWantData(self, tupleSelectors: List[TupleSelector]): for tupleSelector in tupleSelectors: self._sendRequestToServer( PayloadEnvelope({"subscribe": True, "tupleSelector": tupleSelector}) ) def _sendRequestToServer(self, payload): payload.filt.update(self._sendFilt) d = VortexFactory.sendVortexMsg(vortexMsgs=payload.toVortexMsg(), destVortexName=self._destVortexName) d.addErrback(vortexLogFailure, logger, consumeError=True) def _sendUnsubscribeToServer(self, tupleSelector: TupleSelector): payload = PayloadEnvelope() payload.filt["tupleSelector"] = tupleSelector payload.filt["unsubscribe"] = True self._sendRequestToServer(payload)