class NgasConnection(): ''' A simple API for NGAS ''' def __init__( self ): self.dbName = "ngas" self.dbcon = DbConnection( baseUrl ) def __readBinaryFileIntoString( self, filename ): with open(filename, mode='rb') as file: # b is important -> binary fileContent = file.read() s = base64.b64encode(fileContent).decode() return s def __writeStringAsBinaryFile( self, s, filename ): b = base64.b64decode( s ) with open( filename, mode='wb' ) as file: file.write( b ) def put( self, pathname ): s = self.__readBinaryFileIntoString( pathname ) basename = os.path.basename( pathname ) file = {} # file['filename'] = basename file['encodedContents'] = s file['writeTimestamp'] = dbdrwutils.nowISO() # print( ">>> attempting save(): dbName: %s, basename: %s, file: %s" % (self.dbName, basename, file) ) retcode,msg = self.dbcon.save( self.dbName, basename, file ) # print( ">>> ngas retcode:", retcode, "msg:", msg ) return 0 if (retcode==201) else retcode def check( self, id ): "Return true if we have a file with the given ID, false otherwise" retcode,files = self.dbcon.findOne( self.dbName, id ) if retcode != 200: raise RuntimeError( "NGAS: error %d: %s" % ( retcode,files )) return True if ( len(files) > 0 ) else False
if retcode != 200: raise RuntimeError("find: %s: error %d: %s" % (dbName, retcode, OUSs)) # For each OUS status entity we found, see if all data was actually replicated here if len(ousStatuses) > 0: startTime = time.time() # Reset startTime for incremental waiting ousStatuses = sorted(ousStatuses, key=compareByTimestamp) for ous in ousStatuses: ousUID = ous['entityId'] # ts = ous['timestamp'] # print( ">>> found", ousUID, ts ) # Retrieve the list of products from the delivery status encodedUID = dbdrwutils.encode(ousUID) dbName = 'delivery-status' retcode, delStatus = dbcon.findOne(dbName, encodedUID) if retcode != 200: raise RuntimeError("find: %s: error %d: %s" % (dbName, retcode, delStatus)) # See if all those data products were replicated here dataProducts = delStatus['dataProducts'] allReplicated = True for dataProduct in dataProducts: # print( ">>> found", dataProduct ) if (not ngas.check(dataProduct)): allReplicated = False break # YES, all those data products were replicated -- this OUS can # be delivered
class XTSS(): def __init__(self): self._baseUrl = "http://localhost:5984" # CouchDB self._dbcon = DbConnection(self._baseUrl) self._dbName = "status-entities" self._broker = RabbitMqMessageBroker() self._subscriber = Subscriber(self._broker, 'xtss.transitions', 'xtss') def start(self): executor = Executor('localhost', 'msgq', 'xtss', self.xtss) print(" [x] Awaiting RPC requests to 'xtss'") executor.run() def __nowISO(self): return datetime.datetime.utcnow().isoformat()[:-3] def findOUSStatus(self, ousUID): "Find an OUSStatus with the given ID, raise an error if none are found" retcode,ousStatus = self._dbcon.findOne(self._dbName, ousUID) if retcode == 404: raise RuntimeError("OUS not found: %s" % ousUID) return ousStatus def setField(self, ousUID, fieldName, fieldValue): "Set the value of a field of an OUSStatus, update its timestamp" ousStatus = self.findOUSStatus(ousUID) ousStatus[fieldName] = fieldValue ousStatus['timestamp'] = self.__nowISO() retcode,msg = self._dbcon.save(self._dbName, ousUID, ousStatus) return retcode def setState(self, ousUID, state): "Set the state of an OUSStatus" return self.setField ousUID, 'state', state) def setSubstate(self, ousUID, substate): "Set the substate of an OUSStatus" return self.setField(ousUID, 'substate', substate) def setExecutive(self, ousUID, executive): "Set the Executive of an OUSStatus" return self.setFlag(self, ousUID, 'PL_PROCESSING_EXECUTIVE', executive) def clearExecutive(self, ousUID): "Clear the Executive of an OUSStatus" return self.clearFlag ousUID, 'PL_PROCESSING_EXECUTIVE') def setFlag(self, ousUID, name, value): "Set an OUSStatus flag" ousStatus = self.findOUSStatus ousUID) if 'flags' in ousStatus: flags = ousStatus['flags'] else: flags = {} flags[name] = value return self.setField(ousUID, 'flags', flags) def clearFlag(self, ousUID, name): "Clear an OUSStatus flag" ousStatus = self.findOUSStatus(ousUID) if 'flags' in ousStatus: flags = ousStatus['flags'] else: flags = {} if name in flags: del flags[name] return self.setField(ousUID, 'flags', flags) def findByStateSubstate(self, state, substate): """ Returns a return code and, if all was well and the code is 200, all OUSs with the given state and substate; note substate is interpreted as a regexp """ selector = { "selector": { "state": state, "substate": { "$regex": substate }
class MqConnection(): """ Implements a RabbitMQ-like message queue. Implementation is based on CouchDB Constructor args: host: where the queue server is running queueName: name of queue to communicate on """ try: # This may or may not work -- it's some third party service I found somwewhere # It will fail if queried too often, like more than once per second myIP = urllib.request.urlopen('http://api.infoip.io/ip').read().decode( 'utf8') except Exception: myIP = "0.0.0.0" def __init__(self, host, queueName, listenTo=None, sendTo=None): self.host = host self.queueName = queueName self.listenTo = None self.sendTo = None if listenTo != None: self.listenTo = listenTo if sendTo != None: self.sendTo = sendTo self.dbcon = DbConnection(baseUrl) # print( " [x] Created queue %s on %s" % ( self.queueName, self.host )) def send(self, messageBody, selector=None, addMsgbackID=False): ''' Send a message to some other filter listening on the queue for the selector ''' if selector == None: selector = self.sendTo if selector == None: raise RuntimeError("No selectors to send to") now = nowISO() # Are we breadcasting to a group? if not selector.endswith('.*'): # NO, just to a single receiver return self._send(now, selector, addMsgbackID, messageBody) else: # YES, let's retrieve the group and send to all its participants # TODO: cache group definition somewhere instead of querying # the database every time retcode, group = self.dbcon.findOne(self.queueName, selector) if retcode == 404: raise RuntimeError("Group not found: %" % selector) messages = [] for member in group['members']: m = self._send(now, member, addMsgbackID, messageBody) messages.append(m) return messages def _send(self, now, selector, addMsgbackID, messageBody): message = {} msgbackID = str(uuid.uuid4()).replace("-", "") message['creationTimestamp'] = now message['originIP'] = MqConnection.myIP message['selector'] = selector message['consumed'] = False if addMsgbackID: message['msgbackID'] = msgbackID message['body'] = messageBody messageID = now + "-" + msgbackID retcode, msg = self.dbcon.save(self.queueName, messageID, message) if retcode != 201: raise RuntimeError("Msg send failed: DB error: %s: %s" % (retcode, msg)) return message def getNext(self, selector, consume=True, fullMessage=False, condition=None): """ Listen on the queue for for new messages, return the oldest we find. Args: selector: defines what messages to listen to consume: if True, the message will be marked as consumed and no other listener will receive (default is True) fullMessage: if True, the message's metadata will be passed in as well and the actual message will be in the 'body' field (default False) condition: boolean function to be invoked before starting to listen, will cause the thread to sleep if the condition is false """ messages = [] callTime = time.time() # print( ">>> callTime: " + str(callTime) ) selector = { "selector": { "$and": [{ "selector": { "$regex": selector } }, { "consumed": False }] } #, # "sort": [{"creationTimestamp":"desc"}] # # We should let the server sort the results but that # requires an index to be created and I don't care about # that right now -- amchavan, 13-Jul-2018 # # TODO: revisit this if needed } # See if we can even start listening: if we have a conditional expression # and it evaluates to False we need to wait a bit while (condition and (condition() == False)): time.sleep(dbdrwutils.incrementalSleep(callTime)) while True: retcode, messages = self.dbcon.find(self.queueName, selector) # print( ">>> selector:", selector, "found:", len(messages)) if retcode == 200: if len(messages) != 0: break else: time.sleep(dbdrwutils.incrementalSleep(callTime)) else: raise RuntimeError("Msg read failed: DB error: %s: %s" % (retcode, messages)) # print( ">>> found: ", messages ) # print( ">>> found: ", len( messages )) messages.sort(key=lambda x: x['creationTimestamp']) # Oldest first ret = messages[0] if consume: ret['consumed'] = True self.dbcon.save(self.queueName, ret["_id"], ret) # print( ">>> found: ", ret ) if fullMessage: return ret return ret['body'] def listen(self, callback, selector=None, consume=True, fullMessage=False, condition=None): """ Listen on the queueName for messages matching the selector and process them. Args: callback: function to process the message with selector: defines what messages to listen to consume: UNUSED: if True, the message will be marked as consumed and no other listener will receive (default is True) fullMessage: UNUSED: if True, the message's metadata will be passed in as well and the actual message will be in the 'body' field (default False) condition: boolean function to be invoked before starting to listen, will cause the thread to sleep if the condition is false """ if selector == None: selector = self.listenTo if selector == None: raise RuntimeError("No selectors to listen to") while True: # print( ">>> waiting for message on queue '%s' matching selector '%s' ..." % (self.queueName, selector)) message = self.getNext(selector, consume, fullMessage=fullMessage, condition=condition) # print( ">>> got", message ) callback(message) def joinGroup(self, groupName, listener=None): """ Join a group as a listener. Messages sent to the group will be passed on to this instance as well. Group names must end with '.*' Arg listener defaults to the value of the listenTo constructor arg. """ if groupName == None: raise RuntimeError("No group name to join") if not groupName.endswith(".*"): raise RuntimeError("Group names must end with .*") if listener == None: listener = self.listenTo if listener == None: raise RuntimeError("No listener to join as") # We want to join the group groupName as listener # First let's see if that group exists, otherwise we'll create it retcode, group = self.dbcon.findOne(self.queueName, groupName) if retcode == 404: print(">>> not found: group=%s" % groupName) group = {} # group['groupName'] = groupName group['members'] = [listener] self.dbcon.save(self.queueName, groupName, group) print(">>> created: group=%s" % groupName) return # Found a group with that name: if needed, add ourselves to it print(">>> found: group=%s" % group) members = group['members'] if not listener in members: group['members'].append(listener) self.dbcon.save(self.queueName, groupName, group) print(">>> added ourselves as members: group=%s" % group) else: print(">>> already in members list: listener=%s" % listener)
## ./launcher.py 2015.1.00657.S uid://X1/X1/Xb2 parser = argparse.ArgumentParser( description='Starter component, creates status entities') parser.add_argument(dest="progID", help="ID of the project containing the OUS") parser.add_argument(dest="ousUID", help="ID of the OUS that should be processed") args = parser.parse_args() ousUID = args.ousUID progID = args.progID dbName = "status-entities" baseUrl = "http://localhost:5984" # CouchDB dbcon = DbConnection(baseUrl) # If we have one already, delete it retcode, ous = dbcon.findOne(dbName, ousUID) if retcode == 200: dbcon.delete(dbName, ousUID, ous["_rev"]) # Prepare a new record and write it ous = {} ous['entityId'] = ousUID ous['progID'] = progID ous['state'] = "ReadyForProcessing" ous['substate'] = None ous['flags'] = {} ous['timestamp'] = dbdrwutils.nowISO() r, t = dbcon.save(dbName, ousUID, ous) print(">>> retcode:", r)