示例#1
0
def persist_inspection_cluster_results_task():
    """
    Task to run periodically and read houndigrade messages.

    Returns:
        None: Run as an asynchronous Celery task.

    """
    messages = read_messages_from_queue(
        settings.HOUNDIGRADE_RESULTS_QUEUE_NAME, HOUNDIGRADE_MESSAGE_READ_LEN)
    logger.info(
        _('{0} read {1} message(s) for processing').format(
            'persist_inspection_cluster_results_task', len(messages)))
    if bool(messages):
        for message in messages:
            inspection_result = message
            if isinstance(message, str):
                inspection_result = json.loads(message)
            if inspection_result.get(CLOUD_KEY) == CLOUD_TYPE_AWS:
                persist_aws_inspection_cluster_results(inspection_result)
            else:
                logger.error(
                    _('Unsupported cloud type: "{0}"').format(
                        message.get(CLOUD_KEY)))
        scale_down_cluster.delay()
示例#2
0
 def test_read_messages_from_queue_stops_at_limit(self, mock_kombu):
     """Test that messages up to the batch size are read from the queue."""
     mock_messages = self.prepare_mock_kombu_for_consuming(mock_kombu)
     queue_name = 'Test Queue'
     expected_results = [mock_messages[0].payload]
     actual_results = util.read_messages_from_queue(queue_name, 1)
     self.assertEqual(actual_results, expected_results)
     mock_messages[0].ack.assert_called_once_with()
     mock_messages[1].ack.assert_not_called()
示例#3
0
 def test_read_messages_from_queue_once(self, mock_kombu):
     """Test that one message is read from a message queue."""
     mock_messages = self.prepare_mock_kombu_for_consuming(mock_kombu)
     queue_name = 'Test Queue'
     expected_results = [mock_messages[0].payload]
     actual_results = util.read_messages_from_queue(queue_name)
     self.assertEqual(actual_results, expected_results)
     mock_messages[0].ack.assert_called_once_with()
     mock_messages[1].ack.assert_not_called()
示例#4
0
    def test_read_messages_from_queue_stops_at_limit(self, mock_boto3):
        """Test that all messages are read from a message queue."""
        queue_name = 'Test Queue'
        requested_count = util.SQS_RECEIVE_BATCH_SIZE - 1
        actual_count = util.SQS_RECEIVE_BATCH_SIZE + 1

        messages, __, wrapped_messages = self.create_messages(actual_count)
        mock_sqs = mock_boto3.client.return_value
        mock_sqs.receive_message = Mock()
        mock_sqs.receive_message.side_effect = [
            {'Messages': wrapped_messages[:requested_count]},
            {'Messages': []},
        ]
        read_messages = util.read_messages_from_queue(queue_name,
                                                      requested_count)
        self.assertEqual(set(read_messages), set(messages[:requested_count]))
示例#5
0
def scale_up_inspection_cluster():
    """
    Scale up the "houndigrade" inspection cluster.

    Returns:
        None: Run as a scheduled Celery task.

    """
    scaled_down, auto_scaling_group = aws.is_scaled_down(
        settings.HOUNDIGRADE_AWS_AUTOSCALING_GROUP_NAME
    )
    if not scaled_down:
        # Quietly exit and let a future run check the scaling.
        args = {
            'name': settings.HOUNDIGRADE_AWS_AUTOSCALING_GROUP_NAME,
            'min_size': auto_scaling_group.get('MinSize'),
            'max_size': auto_scaling_group.get('MinSize'),
            'desired_capacity': auto_scaling_group.get('DesiredCapacity'),
            'len_instances': len(auto_scaling_group.get('Instances', []))
        }
        logger.info(_('Auto Scaling group "%(name)s" is not scaled down. '
                      'MinSize=%(min_size)s MaxSize=%(max_size)s '
                      'DesiredCapacity=%(desired_capacity)s '
                      'len(Instances)=%(len_instances)s'), args)
        for instance in auto_scaling_group.get('Instances', []):
            logger.info(_('Instance exists: %s'), instance.get('InstanceId'))
        return

    messages = read_messages_from_queue(
        'ready_volumes',
        settings.HOUNDIGRADE_AWS_VOLUME_BATCH_SIZE
    )

    if len(messages) == 0:
        # Quietly exit and let a future run check for messages.
        logger.info(_('Not scaling up because no new volumes were found.'))
        return

    try:
        aws.scale_up(settings.HOUNDIGRADE_AWS_AUTOSCALING_GROUP_NAME)
    except ClientError:
        # If scale_up fails unexpectedly, requeue messages so they aren't lost.
        add_messages_to_queue('ready_volumes', messages)
        raise

    run_inspection_cluster.delay(messages)
示例#6
0
    def test_read_messages_from_queue_stops_has_error(self, mock_boto3):
        """Test we log if an error is raised when deleting from a queue."""
        queue_name = 'Test Queue'
        requested_count = util.SQS_RECEIVE_BATCH_SIZE - 1
        actual_count = util.SQS_RECEIVE_BATCH_SIZE + 1

        messages, __, wrapped_messages = self.create_messages(actual_count)
        mock_sqs = mock_boto3.client.return_value
        mock_sqs.receive_message = Mock()
        mock_sqs.receive_message.side_effect = [
            {'Messages': wrapped_messages[:requested_count]},
            {'Messages': []},
        ]
        error_response = {
            'Error': {
                'Code': 'it is a mystery'
            }
        }
        exception = ClientError(error_response, Mock())
        mock_sqs.delete_message.side_effect = exception
        read_messages = util.read_messages_from_queue(queue_name,
                                                      requested_count)
        self.assertEqual(set(read_messages), set())