Beispiel #1
0
    def test_add_messages_to_queue(self, mock_kombu):
        """Test that messages get added to a message queue."""
        queue_name = 'Test Queue'
        region = random.choice(util_helper.SOME_AWS_REGIONS)
        instance = util_helper.generate_dummy_describe_instance()
        instances_data = {region: [instance]}
        ami_list = [instance['ImageId']]

        messages = util.generate_aws_ami_messages(instances_data, ami_list)
        mock_routing_key = queue_name
        mock_body = messages[0]

        mock_exchange = mock_kombu.Exchange.return_value
        mock_queue = mock_kombu.Queue.return_value
        mock_conn = mock_kombu.Connection.return_value
        mock_with_conn = mock_conn.__enter__.return_value
        mock_producer = mock_with_conn.Producer.return_value
        mock_pub = mock_producer.publish

        util.add_messages_to_queue(queue_name, messages)

        mock_pub.assert_called_with(mock_body,
                                    retry=True,
                                    exchange=mock_exchange,
                                    routing_key=mock_routing_key,
                                    declare=[mock_queue])
Beispiel #2
0
    def test_add_messages_to_queue(self, mock_boto3):
        """Test that messages get added to a message queue."""
        queue_name = 'Test Queue'
        messages, wrapped_messages, __ = self.create_messages()
        mock_sqs = mock_boto3.client.return_value
        mock_queue_url = Mock()
        mock_sqs.get_queue_url.return_value = {'QueueUrl': mock_queue_url}

        with patch.object(util, '_sqs_wrap_message') as mock_sqs_wrap_message:
            mock_sqs_wrap_message.return_value = wrapped_messages[0]
            util.add_messages_to_queue(queue_name, messages)
            mock_sqs_wrap_message.assert_called_once_with(messages[0])

        mock_sqs.send_message_batch.assert_called_with(
            QueueUrl=mock_queue_url, Entries=wrapped_messages
        )
Beispiel #3
0
def enqueue_ready_volume(ami_id, volume_id, volume_region):
    """
    Enqueues information about an AMI and volume for later use.

    Args:
        ami_id (str): The AWS AMI id for which this request originated
        volume_id (str): The id of the volume that must be ready
        volume_region (str): The region of the volume
    Returns:
        None: Run as an asynchronous Celery task.
    """
    volume = aws.get_volume(volume_id, volume_region)
    aws.check_volume_state(volume)
    messages = [{'ami_id': ami_id, 'volume_id': volume_id}]

    queue_name = '{0}ready_volumes'.format(settings.AWS_NAME_PREFIX)
    add_messages_to_queue(queue_name, messages)
Beispiel #4
0
def scale_up_inspection_cluster():
    """
    Scale up the "houndigrade" inspection cluster.

    Returns:
        None: Run as a scheduled Celery task.

    """
    scaled_down, auto_scaling_group = aws.is_scaled_down(
        settings.HOUNDIGRADE_AWS_AUTOSCALING_GROUP_NAME
    )
    if not scaled_down:
        # Quietly exit and let a future run check the scaling.
        args = {
            'name': settings.HOUNDIGRADE_AWS_AUTOSCALING_GROUP_NAME,
            'min_size': auto_scaling_group.get('MinSize'),
            'max_size': auto_scaling_group.get('MinSize'),
            'desired_capacity': auto_scaling_group.get('DesiredCapacity'),
            'len_instances': len(auto_scaling_group.get('Instances', []))
        }
        logger.info(_('Auto Scaling group "%(name)s" is not scaled down. '
                      'MinSize=%(min_size)s MaxSize=%(max_size)s '
                      'DesiredCapacity=%(desired_capacity)s '
                      'len(Instances)=%(len_instances)s'), args)
        for instance in auto_scaling_group.get('Instances', []):
            logger.info(_('Instance exists: %s'), instance.get('InstanceId'))
        return

    messages = read_messages_from_queue(
        'ready_volumes',
        settings.HOUNDIGRADE_AWS_VOLUME_BATCH_SIZE
    )

    if len(messages) == 0:
        # Quietly exit and let a future run check for messages.
        logger.info(_('Not scaling up because no new volumes were found.'))
        return

    try:
        aws.scale_up(settings.HOUNDIGRADE_AWS_AUTOSCALING_GROUP_NAME)
    except ClientError:
        # If scale_up fails unexpectedly, requeue messages so they aren't lost.
        add_messages_to_queue('ready_volumes', messages)
        raise

    run_inspection_cluster.delay(messages)
Beispiel #5
0
def enqueue_ready_volume(ami_id, volume_id, region):
    """
    Enqueues information about an AMI and volume for later use.

    Args:
        ami_id (str): The AWS AMI id for which this request originated
        volume_id (str): The id of the volume to mount
        region (str): The region the volume is being created in

    Returns:
        None: Run as an asynchronous Celery task.

    """
    volume = aws.get_volume(volume_id, region)
    aws.check_volume_state(volume)
    messages = [{'ami_id': ami_id, 'volume_id': volume_id}]

    add_messages_to_queue('ready_volumes', messages)
Beispiel #6
0
from queue import Empty
import kombu
from django.conf import settings
from django.utils import timezone
from django.utils.translation import gettext as _
from rest_framework.serializers import ValidationError
from account import AWS_PROVIDER_STRING
from account.models import (AwsInstance, AwsInstanceEvent, AwsMachineImage,
                            ImageTag, InstanceEvent)
from util.aws import is_instance_windows
from account.util import _create_exchange_and_queue
from account.util import add_messages_to_queue


queue_name = 'brasmith-test-12345'
add_messages_to_queue(queue_name, [{'foo': 'bar'}])
__, message_queue = _create_exchange_and_queue(queue_name)

messages = []
max_count = 4

with kombu.Connection(settings.CELERY_BROKER_URL) as conn:
    try:
        consumer = conn.SimpleQueue(name=message_queue)
        while len(messages) < max_count:
            message = consumer.get_nowait()
            messages.append(message.payload)
            print(message)
            message.ack()
    except Empty as e:
        print('####### exception!!!')