def check_status(self): """Check SQS health by using looking up a known queue's URL'.""" try: queue_name = settings.HOUNDIGRADE_RESULTS_QUEUE_NAME aws.get_sqs_queue_url(queue_name) except ClientError as e: logger.exception(e) self.add_error(_("SQS check failed due to boto3 error.")) except Exception as e: logger.exception(e) self.add_error(_("SQS check failed due to unknown error."))
def read_messages_from_queue(queue_name, max_count=1): """ Read messages (up to max_count) from an SQS queue. Args: queue_name (str): The queue to read messages from max_count (int): Max number of messages to read Returns: list[object]: The de-queued messages. """ queue_url = aws.get_sqs_queue_url(queue_name) sqs = boto3.client("sqs") sqs_messages = [] max_batch_size = min(SQS_RECEIVE_BATCH_SIZE, max_count) for __ in range(max_count): # Because receive_message does *not* actually reliably return # MaxNumberOfMessages number of messages especially (read the docs), # our iteration count is actually max_count and we have some # conditions at the end that break us out when we reach the true end. new_messages = sqs.receive_message( QueueUrl=queue_url, MaxNumberOfMessages=max_batch_size).get("Messages", []) if len(new_messages) == 0: break sqs_messages.extend(new_messages) if len(sqs_messages) >= max_count: break messages = [] for sqs_message in sqs_messages: try: unwrapped = _sqs_unwrap_message(sqs_message) sqs.delete_message( QueueUrl=queue_url, ReceiptHandle=sqs_message["ReceiptHandle"], ) messages.append(unwrapped) except ClientError as e: # I'm not sure exactly what exceptions could land here, but we # probably should log them, stop attempting further deletes, and # return what we have received (and thus deleted!) so far. logger.error( _("Unexpected error when attempting to read from %(queue)s: " "%(error)s"), { "queue": queue_url, "error": getattr(e, "response", {}).get("Error") }, ) logger.exception(e) break return messages
def persist_inspection_cluster_results_task(): """ Task to run periodically and read houndigrade messages. Returns: None: Run as an asynchronous Celery task. """ queue_url = aws.get_sqs_queue_url(settings.HOUNDIGRADE_RESULTS_QUEUE_NAME) successes, failures = [], [] for message in aws.yield_messages_from_queue( queue_url, settings.AWS_SQS_MAX_HOUNDI_YIELD_COUNT): logger.info(_('Processing inspection results with id "%s"'), message.message_id) inspection_results = json.loads(message.body) if inspection_results.get(CLOUD_KEY) == CLOUD_TYPE_AWS: try: persist_aws_inspection_cluster_results(inspection_results) except Exception as e: logger.exception( _("Unexpected error in result processing: %s"), e) logger.debug(_("Failed message body is: %s"), message.body) failures.append(message) continue logger.info( _("Successfully processed message id %s; deleting from queue." ), message.message_id, ) aws.delete_messages_from_queue(queue_url, [message]) successes.append(message) else: logger.error(_('Unsupported cloud type: "%s"'), inspection_results.get(CLOUD_KEY)) failures.append(message) if successes or failures: scale_down_cluster.delay() else: logger.info("No inspection results found.") return successes, failures
def add_messages_to_queue(queue_name, messages): """ Send messages to an SQS queue. Args: queue_name (str): The queue to add messages to messages (list[dict]): A list of message dictionaries. The message dicts will be serialized as JSON strings. """ queue_url = aws.get_sqs_queue_url(queue_name) sqs = boto3.client("sqs") wrapped_messages = [_sqs_wrap_message(message) for message in messages] batch_count = math.ceil(len(messages) / SQS_SEND_BATCH_SIZE) for batch_num in range(batch_count): start_pos = batch_num * SQS_SEND_BATCH_SIZE end_pos = start_pos + SQS_SEND_BATCH_SIZE - 1 batch = wrapped_messages[start_pos:end_pos] sqs.send_message_batch(QueueUrl=queue_url, Entries=batch)
def handle(self, *args, **options): """Handle the command execution.""" for queue_name in self.queue_names: self.stdout.write('Configuring SQS queue "{}"'.format(queue_name)) queue_url = aws.get_sqs_queue_url(queue_name) aws.ensure_queue_has_dlq(queue_name, queue_url) for queue_url in self.queue_urls: queue_name = queue_url.split("/")[-1] self.stdout.write('Configuring SQS queue "{}"'.format(queue_name)) try: aws.ensure_queue_has_dlq(queue_name, queue_url) except: # noqa: E722 logger.error( _("Failed to configure queue %(name)s at %(url)s. " "Please verify that a queue exists in this location. " "We may not create this queue automatically!"), { "name": queue_name, "url": queue_url }, ) raise