def initializeHandler(cls, serviceInfoDict): """Initialization of Pilots Logging service""" cls.consumersSet = set() try: result = ObjectLoader().loadObject( "WorkloadManagementSystem.DB.PilotsLoggingDB", "PilotsLoggingDB") if not result["OK"]: return result cls.pilotsLoggingDB = result["Value"](parentLogger=cls.log) except RuntimeError as excp: return S_ERROR("Can't connect to DB: %s" % excp) queue = cls.srv_getCSOption("PilotsLoggingQueue") # This is pretty awful hack. Somehow, for uknown reason, I cannot access CS with srv_getCSOption. # The only way is using full CS path, so I'm using it as a backup solution. if not queue: queue = gConfig.getValue(serviceInfoDict["serviceSectionPath"] + "/PilotsLoggingQueue") result = createConsumer(queue, callback=cls.consumingCallback) if result["OK"]: cls.consumersSet.add(result["Value"]) else: return result return S_OK()
def processRecords(self): """ It consumes all messaged from the MQ (these are failover messages). In case of failure, the messages will be inserted to the MQ again. """ result = createConsumer(self.__monitoringType) if not result["OK"]: gLogger.error("Fail to create Consumer: %s" % result["Message"]) return S_ERROR("Fail to create Consumer: %s" % result["Message"]) else: mqConsumer = result["Value"] result = S_OK() while result["OK"]: # we consume all messages from the consumer internal queue. result = mqConsumer.get() mqConsumer.close() # make sure that we will not proccess any more messages. if result["OK"]: records = json.loads(result["Value"]) retVal = monitoringDB.put(list(records), self.__monitoringType) if not retVal["OK"]: # the db is not available and we publish again the data to MQ res = self.publishRecords(records) if not res["OK"]: return res return S_OK()
def processRecords(self): """ It consumes all messaged from the MQ (these are failover messages). In case of failure, the messages will be inserted to the MQ again. """ result = createConsumer(self.__monitoringType) if not result['OK']: gLogger.error("Fail to create Consumer: %s" % result['Message']) return S_ERROR("Fail to create Consumer: %s" % result['Message']) else: mqConsumer = result['Value'] result = S_OK() while result['OK']: # we consume all messages from the consumer internal queue. result = mqConsumer.get() mqConsumer.close( ) # make sure that we will not proccess any more messages. if result['OK']: records = json.loads(result['Value']) retVal = monitoringDB.put(list(records), self.__monitoringType) if not retVal['OK']: # the db is not available and we publish again the data to MQ res = self.publishRecords(records) if not res['OK']: return res return S_OK()
def checkConsumers(self): """ Check whether consumers exist and work properly. (Re)create consumers if needed. """ # recreate consumers if there are any problems if not self.consumers or self.messagesCount == self.messagesCountOld: for consumer in self.consumers: consumer.close() for uri in self.am_getOption("MessageQueueURI", "").replace(" ", "").split(","): result = createConsumer(uri, self.processMessage) if not result["OK"]: self.log.error("Failed to create a consumer from URI: %s" % uri) continue else: self.log.info("Successfully created a consumer from URI: %s" % uri) self.consumers.append(result["Value"]) if self.consumers: return S_OK("Successfully created at least one consumer") return S_ERROR("Failed to create at least one consumer") # if everything is OK just update the counter else: self.messagesCountOld = self.messagesCount
def checkConsumers(self): ''' Check whether consumers exist and work properly. (Re)create consumers if needed. ''' # recreate consumers if there are any problems if not self.consumers or self.messagesCount == self.messagesCountOld: for consumer in self.consumers: consumer.close() for uri in self.am_getOption('MessageQueueURI', '').replace(" ", "").split(','): result = createConsumer(uri, self.processMessage) if not result['OK']: self.log.error('Failed to create a consumer from URI: %s' % uri) continue else: self.log.info( 'Successfully created a consumer from URI: %s' % uri) self.consumers.append(result['Value']) if len(self.consumers) > 0: return S_OK('Successfully created at least one consumer') else: return S_ERROR('Failed to create at least one consumer') # if everything is OK just update the counter else: self.messagesCountOld = self.messagesCount
def initializeHandler(cls, serviceInfoDict): """Initialization of Pilots Logging service """ cls.consumersSet = set() cls.pilotsLoggingDB = PilotsLoggingDB() queue = cls.srv_getCSOption("PilotsLoggingQueue") # This is pretty awful hack. Somehow, for uknown reason, I cannot access CS with srv_getCSOption. # The only way is using full CS path, so I'm using it as a backup solution. if not queue: queue = gConfig.getValue(serviceInfoDict['serviceSectionPath'] + "/PilotsLoggingQueue") result = createConsumer(queue, callback=cls.consumingCallback) if result['OK']: cls.consumersSet.add(result['Value']) else: return result return S_OK()
def processRecords(self): """ It consumes all messaged from the MQ (these are failover messages). In case of failure, the messages will be inserted to the MQ again. """ retVal = monitoringDB.pingDB( ) # if the db is not accessible, the records will be not processed from MQ if retVal['OK']: if not retVal['Value']: # false if we can not connect to the db return retVal else: return retVal result = createConsumer("Monitoring::Queue::%s" % self.__monitoringType) if not result['OK']: gLogger.error("Fail to create Consumer: %s" % result['Message']) return S_ERROR("Fail to create Consumer: %s" % result['Message']) else: mqConsumer = result['Value'] result = S_OK() failedToProcess = [] while result['OK']: # we consume all messages from the consumer internal queue. result = mqConsumer.get() if result['OK']: records = json.loads(result['Value']) retVal = monitoringDB.put(list(records), self.__monitoringType) if not retVal['OK']: failedToProcess.append(records) mqConsumer.close( ) # make sure that we will not process any more messages. # the db is not available and we publish again the data to MQ for records in failedToProcess: res = self.publishRecords(records) if not res['OK']: return res return S_OK()
def processRecords(self): """ It consumes all messaged from the MQ (these are failover messages). In case of failure, the messages will be inserted to the MQ again. """ retVal = monitoringDB.pingDB() # if the db is not accessible, the records will be not processed from MQ if retVal['OK']: if not retVal['Value']: # false if we can not connect to the db return retVal else: return retVal result = createConsumer("Monitoring::Queue::%s" % self.__failoverQueueName) if not result['OK']: gLogger.error("Fail to create Consumer: %s" % result['Message']) return S_ERROR("Fail to create Consumer: %s" % result['Message']) else: mqConsumer = result['Value'] result = S_OK() failedToProcess = [] while result['OK']: # we consume all messages from the consumer internal queue. result = mqConsumer.get() if result['OK']: records = json.loads(result['Value']) retVal = monitoringDB.put(list(records), self.__monitoringType) if not retVal['OK']: failedToProcess.append(records) mqConsumer.close() # make sure that we will not process any more messages. # the db is not available and we publish again the data to MQ for records in failedToProcess: res = self.publishRecords(records) if not res['OK']: return res return S_OK()