def manage_lambdas(self, queues: Mapping[str, Queue], enable: bool): """ Enable or disable the readers and writers of the given queues """ with ThreadPoolExecutor(max_workers=len(queues)) as tpe: futures = [] def submit(f, *args, **kwargs): futures.append(tpe.submit(f, *args, **kwargs)) for queue_name, queue in queues.items(): if queue_name == config.notifications_queue_name(): submit(self._manage_lambda, config.indexer_name, enable) submit(self._manage_sqs_push, config.indexer_name + '-contribute', queue, enable) elif queue_name == config.tallies_queue_name(): submit(self._manage_sqs_push, config.indexer_name + '-aggregate', queue, enable) elif queue_name == config.tallies_queue_name(retry=True): # FIXME: Brittle coupling between the string literal below and # the handler function name in app.py # https://github.com/DataBiosphere/azul/issues/1848 submit(self._manage_sqs_push, config.indexer_name + '-aggregate_retry', queue, enable) self._handle_futures(futures) futures = [ tpe.submit(self._wait_for_queue_idle, queue) for queue in queues.values() ] self._handle_futures(futures)
def progress(self) -> JSON: """ The number of Data Store bundles pending to be indexed and the number of index documents in need of updating. """ return { 'up': True, 'unindexed_bundles': sum(self.queues[config.notifications_queue_name()].get('messages', {}).values()), 'unindexed_documents': sum(chain.from_iterable( self.queues[config.tallies_queue_name(retry=retry)].get('messages', {}).values() for retry in (False, True) )) }
config.contribution_lambda_timeout + 10, "message_retention_seconds": 24 * 60 * 60, "redrive_policy": json.dumps({ "maxReceiveCount": 10, "deadLetterTargetArn": "${aws_sqs_queue.%s.arn}" % config.unqual_notifications_queue_name(fail=True) }) }, **{ config.unqual_tallies_queue_name(retry=retry): { "name": config.tallies_queue_name(retry=retry), "fifo_queue": True, "delay_seconds": config.es_refresh_interval + 9, "visibility_timeout_seconds": config.aggregation_lambda_timeout(retry=retry) + 10, "message_retention_seconds": 24 * 60 * 60, "redrive_policy": json.dumps({ "maxReceiveCount": 9 if retry else 1, "deadLetterTargetArn": "${aws_sqs_queue.%s.arn}" % config.unqual_tallies_queue_name(retry=not retry, fail=retry)
def new_handler(self, event, context): app.lambda_context = context return old_handler(self, event, context) old_handler = chalice.app.EventSourceHandler.__call__ chalice.app.EventSourceHandler.__call__ = new_handler @app.on_sqs_message(queue=config.notifications_queue_name(), batch_size=1) def contribute(event: chalice.app.SQSEvent): app.index_controller.contribute(event) @app.on_sqs_message(queue=config.tallies_queue_name(), batch_size=IndexController.document_batch_size) def aggregate(event: chalice.app.SQSEvent): app.index_controller.aggregate(event) # Any messages in the tallies queue that fail being processed will be retried # with more RAM in the tallies_retry queue. @app.on_sqs_message(queue=config.tallies_queue_name(retry=True), batch_size=IndexController.document_batch_size) def aggregate_retry(event: chalice.app.SQSEvent): app.index_controller.aggregate(event, retry=True)
def _tallies_queue(self, retry=False): return self._queue(config.tallies_queue_name(retry=retry))