Beispiel #1
0
def generate_dummy_block_device_mapping(
    device_name=None,
    device_type="Ebs",
    attach_time=None,
    delete_on_termination=True,
    status="attached",
    volume_id=None,
):
    """
    Generate block device mapping to imitate part of 'describe instances' API response.

    All arguments are optional, and any not given will be randomly generated.

    Args:
        device_name (str): Optional known DeviceName value.
        device_type (str): Optional known device type key for nested status details.
        attach_time (str): Optional known AttachTime value.
        delete_on_termination (bool): Optional known DeleteOnTermination value.
        status (str): Optional known Status.
        volume_id (str): Optional known VolumeId value.

    Returns:
        dict: Well-formed BlockDeviceMapping data structure. Example:
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "AttachTime": "2020-10-08T19:07:23+00:00",
            "DeleteOnTermination": true,
            "Status": "attached",
            "VolumeId": "vol-06c61265cb97c1e1e"
          }
        }

    """
    if device_name is None:
        device_index = random.randint(0, 100)
        device_name = misc.generate_device_name(device_index)

    if attach_time is None:
        attach_time = misc.get_now().isoformat()

    if status is None:
        status = random.choice(["attaching", "attached", "detaching"])

    if volume_id is None:
        volume_id = generate_dummy_volume_id()

    mapping = {
        "DeviceName": device_name,
        device_type: {
            "AttachTime": attach_time,
            "DeleteOnTermination": delete_on_termination,
            "Status": status,
            "VolumeId": volume_id,
        },
    }
    return mapping
Beispiel #2
0
    def test_attach_volumes_to_cluster_success(
        self, mock_boto3, mock_run_inspection_cluster
    ):
        """Asserts successful starting of the houndigrade task."""
        ami_id = util_helper.generate_dummy_image_id()
        volume_id = util_helper.generate_dummy_volume_id()
        device_name = misc.generate_device_name(0)
        expected_ami_mountpoints = [(ami_id, device_name)]

        image = account_helper.generate_image(
            ec2_ami_id=ami_id, status=MachineImage.PENDING
        )

        instance_id = util_helper.generate_dummy_instance_id()
        mock_list_container_instances = {"containerInstanceArns": [instance_id]}
        mock_ec2 = Mock()
        mock_ec2_instance = mock_ec2.Instance.return_value
        mock_ec2_instance.state = EC2_INSTANCE_STATE_RUNNING

        mock_ecs = MagicMock()
        mock_ecs.list_container_instances.return_value = mock_list_container_instances

        mock_boto3.client.return_value = mock_ecs
        mock_boto3.resource.return_value = mock_ec2

        mock_ecs.describe_container_instances.return_value = {
            "containerInstances": [{"ec2InstanceId": instance_id}]
        }

        messages = [{"ami_id": ami_id, "volume_id": volume_id}]
        tasks.attach_volumes_to_cluster(messages)

        image.refresh_from_db()

        self.assertEqual(image.status, MachineImage.INSPECTING)

        mock_ecs.list_container_instances.assert_called_once_with(
            cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME, status="ACTIVE"
        )
        mock_ecs.describe_container_instances.assert_called_once_with(
            containerInstances=[instance_id],
            cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME,
        )

        mock_ec2.Volume.assert_called_once_with(volume_id)
        mock_ec2.Volume.return_value.attach_to_instance.assert_called_once_with(
            Device=device_name, InstanceId=instance_id
        )

        mock_run_inspection_cluster.delay.assert_called_once_with(
            instance_id, expected_ami_mountpoints
        )
Beispiel #3
0
 def test_generate_device_name_success(self):
     """Assert that names are being properly generated."""
     self.assertEqual(generate_device_name(0), "/dev/xvdba")
     self.assertEqual(generate_device_name(1, "/dev/sd"), "/dev/sdbb")
Beispiel #4
0
def run_inspection_cluster(messages, cloud='aws'):
    """
    Run task definition for "houndigrade" on the cluster.

    Args:
        messages (list): A list of dictionary items containing
            meta-data (ami_id, volume_id)
        cloud (str): String key representing what cloud we're inspecting.

    Returns:
        None: Run as an asynchronous Celery task.

    """
    for message in messages:
        image = AwsMachineImage.objects.get(ec2_ami_id=message['ami_id'])
        image.status = image.INSPECTING
        image.save()

    task_command = ['-c', cloud]
    if settings.HOUNDIGRADE_DEBUG:
        task_command.extend(['--debug'])

    ecs = boto3.client('ecs')
    # get ecs container instance id
    result = ecs.list_container_instances(
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME)

    # verify we have our single container instance
    num_instances = len(result['containerInstanceArns'])
    if num_instances == 0:
        raise AwsECSInstanceNotReady
    elif num_instances > 1:
        raise AwsTooManyECSInstances

    result = ecs.describe_container_instances(
        containerInstances=[result['containerInstanceArns'][0]],
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME)
    ec2_instance_id = result['containerInstances'][0]['ec2InstanceId']

    # Obtain boto EC2 Instance
    ec2 = boto3.resource('ec2')
    ec2_instance = ec2.Instance(ec2_instance_id)

    logger.info(_('{0} attaching volumes').format('run_inspection_cluster'))
    # attach volumes
    for index, message in enumerate(messages):
        mount_point = generate_device_name(index)
        volume = ec2.Volume(message['volume_id'])
        logger.info(
            _('{0} attaching volume {1} to instance {2}').format(
                'run_inspection_cluster', message['volume_id'],
                ec2_instance_id))

        volume.attach_to_instance(Device=mount_point,
                                  InstanceId=ec2_instance_id)

        logger.info(
            _('{0} modify volume {1} to auto-delete').format(
                'run_inspection_cluster', message['volume_id']))
        # Configure volumes to delete when instance is scaled down
        ec2_instance.modify_attribute(BlockDeviceMappings=[{
            'DeviceName': mount_point,
            'Ebs': {
                'DeleteOnTermination': True
            }
        }])

        task_command.extend(['-t', message['ami_id'], mount_point])

    # create task definition
    result = ecs.register_task_definition(
        family=f'{settings.HOUNDIGRADE_ECS_FAMILY_NAME}',
        containerDefinitions=[{
            'name':
            'Houndigrade',
            'image':
            f'{settings.HOUNDIGRADE_ECS_IMAGE_NAME}:'
            f'{settings.HOUNDIGRADE_ECS_IMAGE_TAG}',
            'cpu':
            0,
            'memoryReservation':
            256,
            'essential':
            True,
            'command':
            task_command,
            'environment': [{
                'name': 'AWS_DEFAULT_REGION',
                'value': settings.AWS_SQS_REGION
            }, {
                'name': 'AWS_ACCESS_KEY_ID',
                'value': settings.AWS_SQS_ACCESS_KEY_ID
            }, {
                'name': 'AWS_SECRET_ACCESS_KEY',
                'value': settings.AWS_SQS_SECRET_ACCESS_KEY
            }, {
                'name': 'RESULTS_QUEUE_NAME',
                'value': settings.HOUNDIGRADE_RESULTS_QUEUE_NAME
            }, {
                'name': 'EXCHANGE_NAME',
                'value': settings.HOUNDIGRADE_EXCHANGE_NAME
            }, {
                'name': 'QUEUE_CONNECTION_URL',
                'value': settings.CELERY_BROKER_URL
            }],
            'privileged':
            True,
        }],
        requiresCompatibilities=['EC2'])

    # release the hounds
    ecs.run_task(
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME,
        taskDefinition=result['taskDefinition']['taskDefinitionArn'],
    )
Beispiel #5
0
def run_inspection_cluster(messages, cloud='aws'):
    """
    Run task definition for "houndigrade" on the cluster.

    Args:
        messages (list): A list of dictionary items containing
            the ami_id and volume_id to be inspected.
        cloud (str): String key representing what cloud we're inspecting.

    Returns:
        None: Run as an asynchronous Celery task.

    """
    task_command = ['-c', cloud]
    if settings.HOUNDIGRADE_DEBUG:
        task_command.extend(['--debug'])

    # get ecs container instance id
    ecs = boto3.client('ecs')
    result = ecs.list_container_instances(
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME)

    # verify we have our single container instance
    num_instances = len(result['containerInstanceArns'])
    if num_instances == 0:
        raise AwsECSInstanceNotReady
    elif num_instances > 1:
        raise AwsTooManyECSInstances

    result = ecs.describe_container_instances(
        containerInstances=[result['containerInstanceArns'][0]],
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME
    )
    instance_id = result['containerInstances'][0]['ec2InstanceId']

    # attach volumes
    ec2 = boto3.resource('ec2')
    for index, message in enumerate(messages):
        mount_point = generate_device_name(index)
        volume = ec2.Volume(message['volume_id'])
        volume.attach_to_instance(Device=mount_point, InstanceId=instance_id)
        task_command.extend(['-t', message['ami_id'], mount_point])

    # create task definition
    result = ecs.register_task_definition(
        family=f'{settings.HOUNDIGRADE_ECS_FAMILY_NAME}',
        containerDefinitions=[
            {
                'name': 'Houndigrade',
                'image': f'{settings.HOUNDIGRADE_ECS_IMAGE_NAME}:'
                         f'{settings.HOUNDIGRADE_ECS_IMAGE_TAG}',
                'cpu': 0,
                'memoryReservation': 256,
                'essential': True,
                'command': task_command,
                'environment': [
                    {
                        'name': 'RESULTS_QUEUE_NAME',
                        'value': settings.HOUNDIGRADE_RESULTS_QUEUE_NAME
                    },
                    {
                        'name': 'EXCHANGE_NAME',
                        'value': settings.HOUNDIGRADE_EXCHANGE_NAME
                    },
                    {
                        'name': 'QUEUE_CONNECTION_URL',
                        'value': settings.CELERY_BROKER_URL
                    }
                ],
                'privileged': True,

            }
        ],
        requiresCompatibilities=['EC2']
    )

    # release the hounds
    ecs.run_task(
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME,
        taskDefinition=result['taskDefinition']['taskDefinitionArn'],
    )
Beispiel #6
0
def attach_volumes_to_cluster(messages):  # noqa: C901
    """
    Ensure houndigrade cluster instance is ready and attach volumes for inspection.

    Args:
        messages (list): list of dicts describing images to inspect. items look like
            {"ami_id": "ami-1234567890", "volume_id": "vol-1234567890"}

    Returns:
        None: Run as an asynchronous Celery task.

    """
    log_prefix = "attach_volumes_to_cluster"
    relevant_messages = _filter_messages_for_inspection(messages)

    if not relevant_messages:
        # Early return if nothing actually needs inspection.
        # The cluster has likely scaled up now and needs to scale down.
        scale_down_cluster.delay()
        return

    ecs = boto3.client("ecs")
    # get ecs container instance id
    result = ecs.list_container_instances(
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME, status="ACTIVE")

    # verify we have our single container instance
    num_instances = len(result["containerInstanceArns"])
    if num_instances == 0:
        raise AwsECSInstanceNotReady
    elif num_instances > 1:
        raise AwsTooManyECSInstances

    result = ecs.describe_container_instances(
        containerInstances=[result["containerInstanceArns"][0]],
        cluster=settings.HOUNDIGRADE_ECS_CLUSTER_NAME,
    )
    ec2_instance_id = result["containerInstances"][0]["ec2InstanceId"]

    # Obtain boto EC2 Instance
    ec2 = boto3.resource("ec2")
    ec2_instance = ec2.Instance(ec2_instance_id)
    current_state = ec2_instance.state
    if not aws.InstanceState.is_running(current_state["Code"]):
        logger.warning(
            _("ECS cluster %(cluster)s has container instance %(ec2_instance_id)s "
              "but instance state is %(current_state)s"),
            {
                "cluster": settings.HOUNDIGRADE_ECS_CLUSTER_NAME,
                "ec2_instance_id": ec2_instance_id,
                "current_state": current_state,
            },
        )
        raise AwsECSInstanceNotReady

    logger.info(_("%s attaching volumes"), log_prefix)

    # attach volumes and track which AMI is used at which mount point
    ami_mountpoints = []
    for index, message in enumerate(relevant_messages):
        ec2_ami_id = message["ami_id"]
        ec2_volume_id = message["volume_id"]
        mount_point = generate_device_name(index)
        volume = ec2.Volume(ec2_volume_id)
        logger.info(
            _("%(label)s attaching volume %(volume_id)s from AMI "
              "%(ami_id)s to instance %(instance)s at %(mount_point)s"),
            {
                "label": log_prefix,
                "volume_id": ec2_volume_id,
                "ami_id": ec2_ami_id,
                "instance": ec2_instance_id,
                "mount_point": mount_point,
            },
        )
        try:
            volume.attach_to_instance(Device=mount_point,
                                      InstanceId=ec2_instance_id)
        except ClientError as e:
            error_code = e.response.get("Error").get("Code")
            error_message = e.response.get("Error").get("Message")

            if (error_code in ("OptInRequired", "IncorrectInstanceState")
                    and "marketplace" in error_message.lower()):
                logger.info(
                    _('Found a marketplace AMI "%s" when trying to copy '
                      "volume. This should not happen, but here we are."),
                    ec2_ami_id,
                )
                save_success = update_aws_image_status_inspected(ec2_ami_id)
            else:
                logger.error(
                    _("Encountered an issue when trying to attach "
                      'volume "%(volume_id)s" from AMI "%(ami_id)s" '
                      "to inspection instance. Error code: "
                      '"%(error_code)s". Error message: '
                      '"%(error_message)s". Setting image state to '
                      "ERROR."),
                    {
                        "volume_id": ec2_volume_id,
                        "ami_id": ec2_ami_id,
                        "error_code": error_code,
                        "error_message": error_message,
                    },
                )
                save_success = update_aws_image_status_error(ec2_ami_id)

            if not save_success:
                logger.warning(
                    _("Failed to save updated status to AwsMachineImage for "
                      "EC2 AMI ID %(ec2_ami_id)s in run_inspection_cluster"),
                    {"ec2_ami_id": ec2_ami_id},
                )

            volume.delete()
            continue

        logger.info(
            _("%(label)s modify volume %(volume_id)s to auto-delete"),
            {
                "label": log_prefix,
                "volume_id": ec2_volume_id
            },
        )
        # Configure volumes to delete when instance is scaled down
        ec2_instance.modify_attribute(BlockDeviceMappings=[{
            "DeviceName": mount_point,
            "Ebs": {
                "DeleteOnTermination": True
            },
        }])

        ami_mountpoints.append((message["ami_id"], mount_point))

    if not ami_mountpoints:
        logger.warning(_("No targets left to inspect, exiting early."))
        return

    run_inspection_cluster.delay(ec2_instance_id, ami_mountpoints)