def testPollOneMessageWithUnackedMessagesReturnedToQueue(self): # Verify that unacked messages retrieved by polling are returned to the # queue after closing the MessageBusConnector instance mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) # Publish messages to the queue expectedContent = [str(i) for i in xrange(10)] for body in expectedContent: bus.publish(mqName, body, persistent=True) # Retrive the published messages without acking them actualContent = [] with MessageBusConnector() as bus: with bus.consume(mqName) as consumer: for i in xrange(len(expectedContent)): msg = consumer.pollOneMessage() actualContent.append(msg.body) msg = consumer.pollOneMessage() self.assertIsNone(msg) self.assertEqual(actualContent, expectedContent) del actualContent # Now read them again, they should have been returned to the message queue # in the original order. # NOTE: RabbitMQ broker restores them back in original order, but this is # not mandated by AMQP 0.9.1 actualContent = [] with MessageBusConnector() as bus: with bus.consume(mqName) as consumer: for i in xrange(len(expectedContent)): msg = consumer.pollOneMessage() actualContent.append(msg.body) msg.ack() msg = consumer.pollOneMessage() self.assertIsNone(msg) self.assertEqual(actualContent, expectedContent) # Verify that the message queue is empty now self.assertEqual(_getQueueMessageCount(mqName), 0)
def testStartMultipleModelRunnersAndStopThem(self): # Starts several ModelRunners and stops them gracefully # to confirm that they can all stop without conflicting with each other: # if ModelRunnerProxy doesn't configure subprocess.Popen with # `close_fds=True`, then graceful shutdown will fail because the stdin # of some child processes will be cloned into those that are started # after them and closing stding of an earlier ModelRunner child process # won't have the desired effect of terminating that process (since other # clones of that file descriptor will prevent it from fully closing) # # TODO send commands to models and verify output runners = [] modelIDs = tuple("abcdef" + str(i) for i in xrange(5)) with ModelSwapperInterface() as swapper: modelInputMQs = tuple( swapper._getModelInputQName(modelID=modelID) for modelID in modelIDs) with amqp_test_utils.managedQueueDeleter(modelInputMQs): with MessageBusConnector() as bus: for mq in modelInputMQs: bus.createMessageQueue(mq, durable=True) for modelID in modelIDs: runners.append( slot_agent.ModelRunnerProxy(modelID=modelID, onTermination=lambda: None, logger=_LOGGER)) returnCodes = [runner.stopGracefully() for runner in runners] self.assertEqual(returnCodes, [0] * len(runners))
def testPublish(self): # Publish messages and verify that they were published mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) # Now add some messages - a small and a large one msg1 = "a" * 100 msg2 = "b" * 100000 bus.publish(mqName, msg1, persistent=True) bus.publish(mqName, msg2, persistent=True) # Verify that the messages were added self.assertEqual(_getQueueMessageCount(mqName), 2) connParams = amqp.connection.getRabbitmqConnectionParameters() with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as ( amqpClient): msg = amqpClient.getOneMessage(mqName, noAck=False) self.assertEqual(msg.body, msg1) msg.ack() msg = amqpClient.getOneMessage(mqName, noAck=False) self.assertEqual(msg.body, msg2) msg.ack() self.assertEqual(_getQueueMessageCount(mqName), 0)
def testPublishExgNotPublished(self): """ Test MessageBusConnector.publishExg returns false when failed to publish immediately """ exgName = "testPublishExgNotPublished" routingKey = "testPublishExgNotPublished-routing-key" # Create an exchange, but don't bind a queue to it connParams = amqp.connection.getRabbitmqConnectionParameters() with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as ( amqpClient): amqpClient.declareExchange(exgName, exchangeType="direct") # Now publish to that exchange via MessageBusConnector with MessageBusConnector() as bus: # Now attempt to publish a single message with mandatory=True published = bus.publishExg( exchange=exgName, routingKey=routingKey, body="testPublishExgNotPublished-body", properties=None, mandatory=True) # Verify that the message failed to publish self.assertFalse(published)
def testPublishManyMessages(self): numMessagesToPublish = 50 mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) # Now add a bunch of messages expectedContent = [str(i) for i in xrange(numMessagesToPublish)] _LOGGER.info("testPublishManyMessages: publishing %s tiny messages", numMessagesToPublish) for body in expectedContent: bus.publish(mqName, body, persistent=True) _LOGGER.info("testPublishManyMessages: done publishing %s tiny " "messages", numMessagesToPublish) # Verify that the messages were added self.assertEqual(_getQueueMessageCount(mqName), numMessagesToPublish) connParams = amqp.connection.getRabbitmqConnectionParameters() with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as ( amqpClient): actualContent = [] for i in xrange(numMessagesToPublish): msg = amqpClient.getOneMessage(mqName, noAck=False) actualContent.append(msg.body) msg.ack() self.assertSequenceEqual(actualContent, expectedContent)
def testPollOneMessage(self): # Verify that it can retrieve a message by polling mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName, durable=True) with bus.consume(mqName) as consumer: # Now add some messages msgBody1 = "a" * 100 msgBody2 = "b" * 100000 bus.publish(mqName, msgBody1, persistent=True) bus.publish(mqName, msgBody2, persistent=True) msg = consumer.pollOneMessage() msg.ack() self.assertEqual(msg.body, msgBody1) msg = consumer.pollOneMessage() msg.ack() self.assertEqual(msg.body, msgBody2) msg = consumer.pollOneMessage() self.assertIsNone(msg) # Verify that consumer's context manager cleaned up self.assertIsNone(consumer._channelMgr) # Verify that the message queue is empty now self.assertEqual(_getQueueMessageCount(mqName), 0)
def testConsumerIterable(self): # Create a message queue, publish some messages to it, and then use # the message consumer iterable to retrieve those messages numMessagesToPublish = 10 mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) # Now add a bunch of messages expectedContent = [] for i in xrange(numMessagesToPublish): expectedContent.append(str(i)) bus.publish(mqName, expectedContent[-1], persistent=True) # Verify that correct number of messages were published self.assertEqual(_getQueueMessageCount(mqName), numMessagesToPublish) # Now, create a consumer iterable and consume the messages # NOTE: we use a thread to avoid deadlocking the test runner in case # something is wrong with the iterable def runConsumerThread(mqName, numMessages, resultQ): try: with MessageBusConnector() as bus: with bus.consume(mqName=mqName) as consumer: it = iter(consumer) for _i in xrange(numMessages): msg = next(it) resultQ.put(msg.body) msg.ack() except: resultQ.put(dict(exception=sys.exc_info()[1])) raise resultQ = Queue.Queue() consumerThread = threading.Thread( target=runConsumerThread, args=(mqName, numMessagesToPublish, resultQ)) consumerThread.setDaemon(True) consumerThread.start() consumerThread.join(timeout=30) self.assertFalse(consumerThread.isAlive()) # Verify content actualContent = [] while True: try: actualContent.append(resultQ.get_nowait()) except Queue.Empty: break self.assertEqual(actualContent, expectedContent) # Verify that the message queue is now empty self.assertEqual(_getQueueMessageCount(mqName), 0)
def testPurge(self): # Create a message queue, add some messages to it, then purge the data and # verify that it's empty mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) # Now add some messages bus.publish(mqName, "abc", persistent=True) bus.publish(mqName, "def", persistent=True) # Verify that the messages were added self.assertEqual(_getQueueMessageCount(mqName), 2) self.assertFalse(bus.isEmpty(mqName)) # Purge the queue bus.purge(mqName=mqName) # Verify that the message queue is now empty self.assertEqual(_getQueueMessageCount(mqName), 0) self.assertTrue(bus.isEmpty(mqName))
def __init__(self): """ Initialize the ModelSwapperInterface. This uses a lazy loading of the input and output queues with no pre-meditation. """ self._logger = _getLogger() config = ModelSwapperConfig() self._resultsQueueName = config.get(self._CONFIG_SECTION, self._RESULTS_Q_OPTION_NAME) # The name of a model's input message queue is the concatenation of this # prefix and the modelID self._modelInputQueueNamePrefix = config.get( self._CONFIG_SECTION, self._MODEL_INPUT_Q_PREFIX_OPTION_NAME) self._schedulerNotificationQueueName = config.get( self._CONFIG_SECTION, self._SCHEDULER_NOTIFICATION_Q_OPTION_NAME) # Message bus connector self._bus = MessageBusConnector() # Outstanding request and/or response consumer instances self._consumers = []
def testCreateDurableMessageQueueSecondTime(self): # Create a durable message queue and verify that repeating the create call # succeeds mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: bus.createMessageQueue(mqName=mqName, durable=True) self.assertEqual(_getQueueMessageCount(mqName), 0) # And one more time... with MessageBusConnector() as bus: bus.createMessageQueue(mqName=mqName, durable=True) self.assertEqual(_getQueueMessageCount(mqName), 0)
def testIsEmptyWithQueueNotFound(self): # Verify that isEmpty on a non-existent message queue raises the expected # exception mqName = self._getUniqueMessageQueueName() with MessageBusConnector() as bus: with self.assertRaises(MessageQueueNotFound): bus.isEmpty(mqName=mqName)
def testPublishWithQueueNotFound(self): # Verify that isEmpty on a non-existent message queue raises the expected # exception mqName = self._getUniqueMessageQueueName() with self.assertRaises(MessageQueueNotFound): with MessageBusConnector() as bus: bus.publish(mqName, "abc", persistent=True)
def testIsEmptyWithEmptyQueue(self): mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) self.assertTrue(bus.isEmpty(mqName))
def testPollOneMessageWithQueueNotFound(self): # Verify that calling pollOneMessage on a non-existent queue raises the # expected exception mqName = self._getUniqueMessageQueueName() with MessageBusConnector() as bus: with self.assertRaises(MessageQueueNotFound): with bus.consume(mqName) as consumer: consumer.pollOneMessage()
def runConsumerThread(mqName, resultQ): try: with MessageBusConnector() as bus: with bus.consume(mqName=mqName, blocking=False) as consumer: for msg in consumer: resultQ.put(msg.body) msg.ack() except: resultQ.put(dict(exception=sys.exc_info()[1])) raise
def runConsumerThread(mqName, resultQ): try: with MessageBusConnector() as bus: with bus.consume(mqName=mqName) as consumer: # NOTE: we actually don't expect any messages in this test for msg in consumer: msg.ack() except: # pylint: disable=W0702 # NOTE: this is what we expect in this test since the mq wasn't created resultQ.put(dict(exception=sys.exc_info()[1]))
def runConsumerThread(mqName, numMessages, resultQ): try: with MessageBusConnector() as bus: with bus.consume(mqName=mqName) as consumer: it = iter(consumer) for _i in xrange(numMessages): # Read and don't ack msg = next(it) resultQ.put(msg.body) except: resultQ.put(dict(exception=sys.exc_info()[1])) raise
def onTimeout(resultsQueueName): _LOGGER.error( "Timed out waiting to get results from models; numResults=%d; " "expected=%d", len(seenMetricIDs), len(allMetricIDs)) # HACK delete model swapper results queue to abort the consumer try: with MessageBusConnector() as bus: bus.deleteMessageQueue(resultsQueueName) except Exception: _LOGGER.exception("Failed to delete results mq=%s", resultsQueueName) raise
def testCreateDurableMessageQueue(self): # Create a durable message queue and verify that it exists # TODO Test that it's a Durable queue and auto-delete=false mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: bus.createMessageQueue(mqName=mqName, durable=True) # Check that MessageBusConnector's context manager cleaned up self.assertIsNone(bus._channelMgr) self.assertEqual(_getQueueMessageCount(mqName), 0)
def testPurgeWithEmptyQueue(self): # Verify that puring an empty queue doesn't raise an exception mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) # Purge the empty queue bus.purge(mqName=mqName) # Verify that the message queue exists and indeed has no messages self.assertEqual(_getQueueMessageCount(mqName), 0)
def testGetAllMessageQueues(self): durableMQ = self._getUniqueMessageQueueName() nonDurableMQ = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter((durableMQ, nonDurableMQ)): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=durableMQ, durable=True) bus.createMessageQueue(mqName=nonDurableMQ, durable=False) allQueues = bus.getAllMessageQueues() self.assertIn(durableMQ, allQueues) self.assertIn(nonDurableMQ, allQueues)
def testDeleteMessageQueueThatDoesNotExist(self): # Verify that deleting a non-existent message queue doesn't raise an # exception mqName = self._getUniqueMessageQueueName() # NOTE: deleting an entity that doesn't exist used to result in # NOT_FOUND=404 channel error from RabbitMQ. However, more recent versions # of RabbitMQ changed that behavior such that it now completes with success. # Per https://www.rabbitmq.com/specification.html: "We have made # queue.delete into an idempotent assertion that the queue must not exist, # in the same way that queue.declare asserts that it must." with MessageBusConnector() as bus: bus.deleteMessageQueue(mqName=mqName)
def testIsEmptyWithNonEmptyQueue(self): mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: # Create the queue bus.createMessageQueue(mqName=mqName, durable=True) # Now add some messages bus.publish(mqName, "abc", persistent=True) bus.publish(mqName, "def", persistent=True) # Verify that the messages were added self.assertEqual(_getQueueMessageCount(mqName), 2) self.assertFalse(bus.isEmpty(mqName))
def testDeleteMessageQueue(self): mqName = self._getUniqueMessageQueueName() with amqp_test_utils.managedQueueDeleter(mqName): with MessageBusConnector() as bus: bus.createMessageQueue(mqName=mqName, durable=True) self.assertEqual(_getQueueMessageCount(mqName), 0) bus.deleteMessageQueue(mqName=mqName) connParams = amqp.connection.getRabbitmqConnectionParameters() with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as ( amqpClient): with self.assertRaises(amqp.exceptions.AmqpChannelError) as excContext: r = amqpClient.declareQueue(mqName, passive=True) self.assertEqual(excContext.exception.code, amqp.constants.AMQPErrorCodes.NOT_FOUND)
def testStartModelRunnerAndStopIt(self): # Simple test that starts a ModelRunner and stops it gracefully # TODO send command to model and verify output modelID = "abcdef" with ModelSwapperInterface() as swapper: modelInputMQ = swapper._getModelInputQName(modelID=modelID) with amqp_test_utils.managedQueueDeleter(modelInputMQ): with MessageBusConnector() as bus: bus.createMessageQueue(modelInputMQ, durable=True) runner = slot_agent.ModelRunnerProxy(modelID=modelID, onTermination=lambda: None, logger=_LOGGER) returnCode = runner.stopGracefully() self.assertEqual(returnCode, 0)
def _cleanRabbitmq(): """Delete Taurus Engine-related message queues and exchanges""" g_log.info("Deleting Taurus Engine-related message queues and exchanges") appConfig = taurus.engine.config modelSwapperConfig = model_swapper.ModelSwapperConfig() # Delete queues belonging to Taurus taurusQueues = [ modelSwapperConfig.get("interface_bus", "results_queue"), modelSwapperConfig.get("interface_bus", "scheduler_notification_queue"), appConfig.get("metric_listener", "queue_name"), DynamoDBService._INPUT_QUEUE_NAME # pylint: disable=W0212 ] modelInputPrefix = modelSwapperConfig.get("interface_bus", "model_input_queue_prefix") with MessageBusConnector() as messageBus: for queue in messageBus.getAllMessageQueues(): if queue.startswith(modelInputPrefix) or queue in taurusQueues: messageBus.deleteMessageQueue(queue) # Delete exchanges belonging to Taurus taurusExchanges = [ appConfig.get("metric_streamer", "results_exchange_name"), appConfig.get("non_metric_data", "exchange_name") ] amqpClient = amqp.synchronous_amqp_client.SynchronousAmqpClient( connectionParams=amqp.connection.getRabbitmqConnectionParameters()) with amqpClient: for exg in taurusExchanges: g_log.info("Deleting Taurus exchange=%s", exg) amqpClient.deleteExchange(exchange=exg)
def testMessageBusIsAccessible(self): # pylint: disable=R0201 with MessageBusConnector() as bus: bus.isMessageQeueuePresent("")
messageBus.publishExg(exchange=config.get("metric_streamer", "results_exchange_name"), routingKey="", body=payload, properties=modelInferenceResultProperties) g_log.info("Done! numMetricDataRows=%d; numModels=%d", numMetricDataRows, numModels) if __name__ == "__main__": logging_support.LoggingSupport.initTool() parser = argparse.ArgumentParser( description="Replay metric data to model results exchange") parser.add_argument( "--chunksize", type=int, default=DEFAULT_CHUNKSIZE, metavar="NUM", help=("Maximum number of records to include in a batch of" "model inference results message to model results " "exchange")) _args = parser.parse_args() with MessageBusConnector() as messageBus: replayMetricDataToModelResultsExchange(messageBus=messageBus, chunksize=_args.chunksize)
def testModelSwapper(self): """Simple end-to-end test of the model swapper system.""" modelSchedulerSubprocess = self._startModelSchedulerSubprocess() self.addCleanup(lambda: modelSchedulerSubprocess.kill() if modelSchedulerSubprocess.returncode is None else None) modelID = "foobar" resultBatches = [] with ModelSwapperInterface() as swapperAPI: possibleModels = getScalarMetricWithTimeOfDayParams(metricData=[0], minVal=0, maxVal=1000) # Submit requests including a model creation command and two data rows. args = possibleModels[0] args["inputRecordSchema"] = ( FieldMetaInfo("c0", FieldMetaType.datetime, FieldMetaSpecial.timestamp), FieldMetaInfo("c1", FieldMetaType.float, FieldMetaSpecial.none), ) # Define the model _LOGGER.info("Defining the model") swapperAPI.defineModel(modelID=modelID, args=args, commandID="defineModelCmd1") # Attempt to define the same model again _LOGGER.info("Defining the model again") swapperAPI.defineModel(modelID=modelID, args=args, commandID="defineModelCmd2") # Send input rows to the model inputRows = [ ModelInputRow( rowID="rowfoo", data=[datetime.datetime(2013, 5, 23, 8, 13, 00), 5.3]), ModelInputRow( rowID="rowbar", data=[datetime.datetime(2013, 5, 23, 8, 13, 15), 2.4]), ] _LOGGER.info("Submitting batch of %d input rows...", len(inputRows)) swapperAPI.submitRequests(modelID=modelID, requests=inputRows) _LOGGER.info("These models have pending input: %s", swapperAPI.getModelsWithInputPending()) # Retrieve all results. # NOTE: We collect results via background thread to avoid # deadlocking the test runner in the event consuming blocks unexpectedly _LOGGER.info("Reading all batches of results...") numBatchesExpected = 3 resultBatches.extend( self._consumeResults(numBatchesExpected, timeout=20)) self.assertEqual(len(resultBatches), numBatchesExpected) with MessageBusConnector() as bus: # The results message queue should be empty now self.assertTrue(bus.isEmpty(swapperAPI._resultsQueueName)) # Delete the model _LOGGER.info("Deleting the model") swapperAPI.deleteModel(modelID=modelID, commandID="deleteModelCmd1") _LOGGER.info("Waiting for model deletion result") resultBatches.extend(self._consumeResults(1, timeout=20)) self.assertEqual(len(resultBatches), 4) with MessageBusConnector() as bus: # The results message queue should be empty now self.assertTrue(bus.isEmpty(swapperAPI._resultsQueueName)) # The model input queue should be deleted now self.assertFalse( bus.isMessageQeueuePresent( swapperAPI._getModelInputQName(modelID=modelID))) # Try deleting the model again, to make sure there are no exceptions _LOGGER.info("Attempting to delete the model again") swapperAPI.deleteModel(modelID=modelID, commandID="deleteModelCmd1") # Verify results # First result batch should be the first defineModel result batch = resultBatches[0] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), 1) result = batch.objects[0] self.assertIsInstance(result, ModelCommandResult) self.assertEqual(result.method, "defineModel") self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.commandID, "defineModelCmd1") # The second result batch should for the second defineModel result for the # same model batch = resultBatches[1] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), 1) result = batch.objects[0] self.assertIsInstance(result, ModelCommandResult) self.assertEqual(result.method, "defineModel") self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.commandID, "defineModelCmd2") # The third batch should be for the two input rows batch = resultBatches[2] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), len(inputRows)) for inputRow, result in zip(inputRows, batch.objects): self.assertIsInstance(result, ModelInferenceResult) self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.rowID, inputRow.rowID) self.assertIsInstance(result.anomalyScore, float) # The fourth batch should be for the "deleteModel" batch = resultBatches[3] self.assertEqual(batch.modelID, modelID) self.assertEqual(len(batch.objects), 1) result = batch.objects[0] self.assertIsInstance(result, ModelCommandResult) self.assertEqual(result.method, "deleteModel") self.assertEqual(result.status, htmengineerrno.SUCCESS) self.assertEqual(result.commandID, "deleteModelCmd1") # Signal Model Scheduler Service subprocess to shut down and wait for it waitResult = dict() def runWaiterThread(): try: waitResult["returnCode"] = modelSchedulerSubprocess.wait() except: _LOGGER.exception( "Waiting for modelSchedulerSubprocess failed") waitResult["exceptionInfo"] = traceback.format_exc() raise return modelSchedulerSubprocess.terminate() waiterThread = threading.Thread(target=runWaiterThread) waiterThread.setDaemon(True) waiterThread.start() waiterThread.join(timeout=30) self.assertFalse(waiterThread.isAlive()) self.assertEqual(waitResult["returnCode"], 0, msg=repr(waitResult))
def run(self): """ Consumes pending results. Once result batch arrives, it will be dispatched to the correct model command result handler. :see: `_processModelCommandResult` and `_processModelInferenceResults` """ # Properties for publishing model command results on RabbitMQ exchange modelCommandResultProperties = MessageProperties( deliveryMode=amqp.constants.AMQPDeliveryModes.PERSISTENT_MESSAGE, headers=dict(dataType="model-cmd-result")) # Properties for publishing model inference results on RabbitMQ exchange modelInferenceResultProperties = MessageProperties( deliveryMode=amqp.constants.AMQPDeliveryModes.PERSISTENT_MESSAGE) # Declare an exchange for forwarding our results with amqp.synchronous_amqp_client.SynchronousAmqpClient( amqp.connection.getRabbitmqConnectionParameters( )) as amqpClient: amqpClient.declareExchange(self._modelResultsExchange, exchangeType="fanout", durable=True) with ModelSwapperInterface() as modelSwapper, MessageBusConnector( ) as bus: with modelSwapper.consumeResults() as consumer: for batch in consumer: if self._profiling: batchStartTime = time.time() inferenceResults = [] for result in batch.objects: try: if isinstance(result, ModelCommandResult): self._processModelCommandResult( batch.modelID, result) # Construct model command result message for consumption by # downstream processes try: cmdResultMessage = self._composeModelCommandResultMessage( modelID=batch.modelID, cmdResult=result) except (ObjectNotFoundError, MetricNotMonitoredError): pass else: bus.publishExg( exchange=self._modelResultsExchange, routingKey="", body=self._serializeModelResult( cmdResultMessage), properties=modelCommandResultProperties ) elif isinstance(result, ModelInferenceResult): inferenceResults.append(result) else: self._log.error("Unsupported ModelResult=%r", result) except ObjectNotFoundError: self._log.exception( "Error processing result=%r " "from model=%s", result, batch.modelID) if inferenceResults: result = self._processModelInferenceResults( inferenceResults, metricID=batch.modelID) if result is not None: # Construct model results payload for consumption by # downstream processes metricRow, dataRows = result resultsMessage = self._composeModelInferenceResultsMessage( metricRow, dataRows) payload = self._serializeModelResult( resultsMessage) bus.publishExg( exchange=self._modelResultsExchange, routingKey="", body=payload, properties=modelInferenceResultProperties) batch.ack() if self._profiling: if inferenceResults: if result is not None: # pylint: disable=W0633 metricRow, rows = result rowIdRange = ("%s..%s" % (rows[0].rowid, rows[-1].rowid) if len(rows) > 1 else str( rows[0].rowid)) self._log.info( "{TAG:ANOM.BATCH.INF.DONE} model=%s; " "numItems=%d; rows=[%s]; tailRowTS=%s; duration=%.4fs; " "ds=%s; name=%s", batch.modelID, len(batch.objects), rowIdRange, rows[-1].timestamp.isoformat() + "Z", time.time() - batchStartTime, metricRow.datasource, metricRow.name) else: self._log.info( "{TAG:ANOM.BATCH.CMD.DONE} model=%s; " "numItems=%d; duration=%.4fs", batch.modelID, len(batch.objects), time.time() - batchStartTime) self._log.info("Stopped processing model results")