Beispiel #1
0
def test_collector(historical_role, mock_lambda_environment, historical_sqs,
                   security_groups, current_security_group_table):
    from historical.security_group.models import CurrentSecurityGroupModel
    from historical.security_group.collector import handler
    event = CloudwatchEventFactory(detail=DetailFactory(
        requestParameters={'groupId': security_groups['GroupId']},
        eventName='CreateSecurityGroup'), )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, None)

    assert CurrentSecurityGroupModel.count() == 1

    event = CloudwatchEventFactory(detail=DetailFactory(
        requestParameters={'groupId': security_groups['GroupId']},
        eventName='DeleteSecurityGroup'), )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, None)

    assert CurrentSecurityGroupModel.count() == 0
Beispiel #2
0
def test_collector(historical_role, mock_lambda_environment, vpcs,
                   current_vpc_table):
    from historical.vpc.models import CurrentVPCModel
    from historical.vpc.collector import handler
    event = CloudwatchEventFactory(detail=DetailFactory(
        requestParameters={'vpcId': vpcs['VpcId']}, eventName='CreateVpc'), )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, None)

    assert CurrentVPCModel.count() == 1

    event = CloudwatchEventFactory(detail=DetailFactory(
        requestParameters={'vpcId': vpcs['VpcId']}, eventName='DeleteVpc'), )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, None)

    assert CurrentVPCModel.count() == 0
Beispiel #3
0
def test_collector(historical_role, buckets, mock_lambda_environment,
                   swag_accounts, current_s3_table):
    from historical.s3.collector import handler

    now = datetime.utcnow().replace(tzinfo=None, microsecond=0)
    create_event = CloudwatchEventFactory(
        detail=DetailFactory(requestParameters={"bucketName": "testbucket1"},
                             eventSource="aws.s3",
                             eventName="CreateBucket",
                             eventTime=now))
    data = json.dumps(create_event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)
    result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1"))
    assert len(result) == 1
    # Verify that the tags are duplicated in the top level and configuration:
    assert len(result[0].Tags.attribute_values) == len(
        result[0].configuration.attribute_values["Tags"]) == 1
    assert result[0].Tags.attribute_values["theBucketName"] == \
           result[0].configuration.attribute_values["Tags"]["theBucketName"] == "testbucket1"  # noqa
    assert result[0].eventSource == "aws.s3"

    # Polling (make sure the date is included):
    polling_event = CloudwatchEventFactory(
        detail=DetailFactory(requestParameters={
            "bucketName": "testbucket1",
            "creationDate": now
        },
                             eventSource="historical.s3.poller",
                             eventName="DescribeBucket",
                             eventTime=now))
    data = json.dumps(polling_event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)
    assert CurrentS3Model.count() == 1

    # Load the config and verify the polling timestamp is in there:
    result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1"))
    assert result[0].configuration["CreationDate"] == now.isoformat() + "Z"
    assert result[0].eventSource == "historical.s3.poller"

    # And deletion:
    delete_event = CloudwatchEventFactory(
        detail=DetailFactory(requestParameters={"bucketName": "testbucket1"},
                             eventSource="aws.s3",
                             eventName="DeleteBucket",
                             eventTime=now))
    data = json.dumps(delete_event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)
    handler(data, mock_lambda_environment)
    assert CurrentS3Model.count() == 0
def test_poller_processor_handler(historical_sqs, historical_role, mock_lambda_environment, security_groups, swag_accounts):
    """Test the Poller's processing component that tasks the collector."""
    # Mock this so it returns a `NextToken`:
    def mock_describe_security_groups(**kwargs):
        from cloudaux.aws.ec2 import describe_security_groups

        # Did we receive a NextToken? (this will happen on the second run through to verify that
        # this logic is being reached:
        if kwargs.get('NextToken'):
            assert kwargs['NextToken'] == 'MOARRESULTS'

        result = describe_security_groups(**kwargs)
        result['NextToken'] = 'MOARRESULTS'

        return result

    patch_sgs = patch('historical.security_group.poller.describe_security_groups', mock_describe_security_groups)
    patch_sgs.start()

    from historical.security_group.poller import poller_processor_handler as handler
    from historical.common import cloudwatch

    # Create the events and SQS records:
    messages = make_poller_events()
    event = json.loads(json.dumps(RecordsFactory(records=messages), default=serialize))

    # Run the poller handler:
    handler(event, mock_lambda_environment)

    # Need to ensure that 3 total SGs were added into SQS:
    sqs = boto3.client("sqs", region_name="us-east-1")
    queue_url = get_queue_url(os.environ['POLLER_QUEUE_NAME'])

    messages = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10)['Messages']
    assert len(messages) == 3

    # Verify that the region is properly propagated through, and that we got the collected data:
    for msg in messages:
        body = json.loads(msg['Body'])
        assert cloudwatch.get_region(body) == 'us-east-1'
        assert body['detail']['collected']['OwnerId'] == '123456789012'
        assert not body['detail']['collected'].get('ResponseMetadata')

    # Now, verify that the pagination was sent in properly to SQS tasker queue:
    queue_url = get_queue_url(os.environ['POLLER_TASKER_QUEUE_NAME'])
    messages = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10)['Messages']
    assert len(messages) == 1
    assert json.loads(messages[0]['Body'])['NextToken'] == 'MOARRESULTS'

    # Re-run the poller:
    messages[0]['body'] = messages[0]['Body']   # Need to change the casing
    handler({'Records': messages}, mock_lambda_environment)

    patch_sgs.stop()
def test_poller_processor_handler(historical_sqs, historical_role, mock_lambda_environment, security_groups, swag_accounts):
    from historical.security_group.poller import poller_processor_handler as handler

    # Create the events and SQS records:
    messages = make_poller_events()
    event = json.loads(json.dumps(RecordsFactory(records=messages), default=serialize))

    # Run the collector:
    handler(event, mock_lambda_environment)

    # Need to ensure that 3 total SGs were added into SQS:
    sqs = boto3.client("sqs", region_name="us-east-1")
    queue_url = get_queue_url(os.environ['POLLER_QUEUE_NAME'])

    messages = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10)['Messages']
    assert len(messages) == 3
Beispiel #6
0
def test_collector_on_deleted_bucket(historical_role, buckets,
                                     mock_lambda_environment, swag_accounts,
                                     current_s3_table):
    from historical.s3.collector import handler

    # If an event arrives on a bucket that is deleted, then it should skip
    # and wait until the Deletion event arrives.
    create_event = CloudwatchEventFactory(detail=DetailFactory(
        requestParameters={"bucketName": "not-a-bucket"},
        eventSource="aws.s3",
        eventName="PutBucketPolicy",
    ))
    create_event_data = json.dumps(create_event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=create_event_data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)
    assert CurrentS3Model.count() == 0
def test_differ(current_security_group_table, durable_security_group_table, mock_lambda_environment):
    from historical.security_group.models import DurableSecurityGroupModel
    from historical.security_group.differ import handler
    from historical.models import TTL_EXPIRY

    ttl = int(time.time() + TTL_EXPIRY)
    new_group = SECURITY_GROUP.copy()
    new_group.pop("eventSource")
    new_group['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
    new_group["ttl"] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=new_group,
        Keys={
            'arn': new_group['arn']
        }
    ), eventName='INSERT'), default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=json.dumps(SnsDataFactory(Message=data), default=serialize))])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableSecurityGroupModel.count() == 1

    # ensure no new record for the same data
    duplicate_group = SECURITY_GROUP.copy()
    duplicate_group.pop("eventSource")
    duplicate_group['eventTime'] = datetime(year=2017, month=5, day=12, hour=11, minute=30, second=0).isoformat() + 'Z'
    duplicate_group["ttl"] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=duplicate_group,
        Keys={
            'arn': duplicate_group['arn']
        }
    ), eventName='MODIFY'), default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=json.dumps(SnsDataFactory(Message=data), default=serialize))])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableSecurityGroupModel.count() == 1

    updated_group = SECURITY_GROUP.copy()
    updated_group.pop("eventSource")
    updated_group['eventTime'] = datetime(year=2017, month=5, day=12, hour=11, minute=30, second=0).isoformat() + 'Z'
    updated_group['configuration']['Description'] = 'changeme'
    updated_group["ttl"] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=updated_group,
        Keys={
            'arn': SECURITY_GROUP['arn']
        }
    ), eventName='MODIFY'), default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=json.dumps(SnsDataFactory(Message=data), default=serialize))])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableSecurityGroupModel.count() == 2

    updated_group = SECURITY_GROUP.copy()
    updated_group.pop("eventSource")
    updated_group['eventTime'] = datetime(year=2017, month=5, day=12, hour=9, minute=30, second=0).isoformat() + 'Z'
    updated_group['configuration']['IpPermissions'][0]['IpRanges'][0]['CidrIp'] = 'changeme'
    updated_group["ttl"] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=updated_group,
        Keys={
            'arn': SECURITY_GROUP['arn']
        }
    ), eventName='MODIFY'), default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=json.dumps(SnsDataFactory(Message=data), default=serialize))])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableSecurityGroupModel.count() == 3

    deleted_group = SECURITY_GROUP.copy()
    deleted_group.pop("eventSource")
    deleted_group['eventTime'] = datetime(year=2017, month=5, day=12, hour=12, minute=30, second=0).isoformat() + 'Z'
    deleted_group["ttl"] = ttl

    # ensure new record
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        OldImage=deleted_group,
        Keys={
            'arn': SECURITY_GROUP['arn']
        }),
        eventName='REMOVE',
        userIdentity=UserIdentityFactory(
                type='Service',
                principalId='dynamodb.amazonaws.com'
        )), default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=json.dumps(SnsDataFactory(Message=data), default=serialize))])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableSecurityGroupModel.count() == 4
Beispiel #8
0
def test_differ(current_vpc_table, durable_vpc_table, mock_lambda_environment):
    from historical.vpc.models import DurableVPCModel
    from historical.vpc.differ import handler
    from historical.models import TTL_EXPIRY

    ttl = int(time.time() + TTL_EXPIRY)
    new_vpc = VPC.copy()
    new_vpc.pop("eventSource")
    new_vpc['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=10, minute=30,
        second=0).isoformat() + 'Z'
    new_vpc['ttl'] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=new_vpc, Keys={'arn': new_vpc['arn']}),
                                            eventName='INSERT'),
                      default=serialize)
    data = RecordsFactory(records=[
        SQSDataFactory(
            body=json.dumps(SnsDataFactory(Message=data), default=serialize))
    ])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, None)
    assert DurableVPCModel.count() == 1

    # ensure no new record for the same data
    duplicate_vpc = VPC.copy()
    duplicate_vpc.pop("eventSource")
    duplicate_vpc['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=11, minute=30,
        second=0).isoformat() + 'Z'
    duplicate_vpc['ttl'] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=duplicate_vpc, Keys={'arn': duplicate_vpc['arn']}),
                                            eventName='MODIFY'),
                      default=serialize)
    data = RecordsFactory(records=[
        SQSDataFactory(
            body=json.dumps(SnsDataFactory(Message=data), default=serialize))
    ])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, None)
    assert DurableVPCModel.count() == 1

    updated_vpc = VPC.copy()
    updated_vpc.pop("eventSource")
    updated_vpc['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=11, minute=30,
        second=0).isoformat() + 'Z'
    updated_vpc['configuration']['State'] = 'changeme'
    updated_vpc['ttl'] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=updated_vpc, Keys={'arn': VPC['arn']}),
                                            eventName='MODIFY'),
                      default=serialize)
    data = RecordsFactory(records=[
        SQSDataFactory(
            body=json.dumps(SnsDataFactory(Message=data), default=serialize))
    ])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, None)
    assert DurableVPCModel.count() == 2

    updated_vpc = VPC.copy()
    updated_vpc.pop("eventSource")
    updated_vpc['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=9, minute=30,
        second=0).isoformat() + 'Z'
    updated_vpc['configuration']['CidrBlock'] = 'changeme'
    updated_vpc['ttl'] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=updated_vpc, Keys={'arn': VPC['arn']}),
                                            eventName='MODIFY'),
                      default=serialize)
    data = RecordsFactory(records=[
        SQSDataFactory(
            body=json.dumps(SnsDataFactory(Message=data), default=serialize))
    ])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, None)
    assert DurableVPCModel.count() == 3

    updated_vpc = VPC.copy()
    updated_vpc.pop("eventSource")
    updated_vpc['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=9, minute=31,
        second=0).isoformat() + 'Z'
    updated_vpc.update({'Name': 'blah'})
    updated_vpc['ttl'] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=updated_vpc, Keys={'arn': VPC['arn']}),
                                            eventName='MODIFY'),
                      default=serialize)
    data = RecordsFactory(records=[
        SQSDataFactory(
            body=json.dumps(SnsDataFactory(Message=data), default=serialize))
    ])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, None)
    assert DurableVPCModel.count() == 4

    deleted_vpc = VPC.copy()
    deleted_vpc.pop("eventSource")
    deleted_vpc['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=12, minute=30,
        second=0).isoformat() + 'Z'
    deleted_vpc['ttl'] = ttl

    # ensure new record
    data = json.dumps(DynamoDBRecordFactory(
        dynamodb=DynamoDBDataFactory(OldImage=deleted_vpc,
                                     Keys={'arn': VPC['arn']}),
        eventName='REMOVE',
        userIdentity=UserIdentityFactory(
            type='Service', principalId='dynamodb.amazonaws.com')),
                      default=serialize)
    data = RecordsFactory(records=[
        SQSDataFactory(
            body=json.dumps(SnsDataFactory(Message=data), default=serialize))
    ])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, None)
    assert DurableVPCModel.count() == 5
Beispiel #9
0
def test_snsproxy_dynamodb_differ(historical_role, current_s3_table,
                                  durable_s3_table, mock_lambda_environment,
                                  buckets):
    """
    This mostly checks that the differ is able to properly load the reduced dataset from the SNSProxy.
    """
    # Create the item in the current table:
    from historical.s3.collector import handler as current_handler
    from historical.s3.differ import handler as diff_handler
    from historical.s3.models import CurrentS3Model, DurableS3Model
    from historical.common.sns import shrink_sns_blob

    # Mock out the loggers:
    import historical.common.dynamodb
    old_logger = historical.common.dynamodb.log
    mocked_logger = MagicMock()
    historical.common.dynamodb.log = mocked_logger

    now = datetime.utcnow().replace(tzinfo=None, microsecond=0)
    create_event = CloudwatchEventFactory(
        detail=DetailFactory(requestParameters={"bucketName": "testbucket1"},
                             eventSource="aws.s3",
                             eventName="CreateBucket",
                             eventTime=now))
    data = json.dumps(create_event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    current_handler(data, mock_lambda_environment)
    result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1"))
    assert len(result) == 1

    # Mock out the DDB Stream for this creation and for an item that is NOT in the current table::
    ttl = int(time.time() + TTL_EXPIRY)
    new_bucket = S3_BUCKET.copy()
    new_bucket['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=10, minute=30,
        second=0).isoformat() + 'Z'
    new_bucket['ttl'] = ttl
    ddb_existing_item = DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=new_bucket,
        Keys={'arn': new_bucket['arn']},
        OldImage=new_bucket),
                                              eventName='INSERT')

    missing_bucket = S3_BUCKET.copy()
    missing_bucket['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=10, minute=30,
        second=0).isoformat() + 'Z'
    missing_bucket['ttl'] = ttl
    missing_bucket['BucketName'] = 'notinthecurrenttable'
    missing_bucket['arn'] = 'arn:aws:s3:::notinthecurrenttable'
    missing_bucket['configuration']['Name'] = 'notinthecurrenttable'
    ddb_missing_item = DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=missing_bucket,
        Keys={'arn': 'arn:aws:s3:::notinthecurrenttable'},
        OldImage=new_bucket),
                                             eventName='INSERT')

    # Get the shrunken blob:
    shrunken_existing = json.dumps(
        shrink_sns_blob(
            json.loads(json.dumps(ddb_existing_item, default=serialize))))
    shrunken_missing = json.dumps(
        shrink_sns_blob(
            json.loads(json.dumps(ddb_missing_item, default=serialize))))

    records = RecordsFactory(records=[
        SQSDataFactory(body=json.dumps(
            SnsDataFactory(Message=shrunken_existing), default=serialize)),
        SQSDataFactory(body=json.dumps(
            SnsDataFactory(Message=shrunken_missing), default=serialize))
    ])
    records_event = json.loads(json.dumps(records, default=serialize))

    # Run the differ:
    diff_handler(records_event, mock_lambda_environment)

    # Verify that the existing bucket in the Current table is in the Durable table with the correct configuration:
    result = list(DurableS3Model.query("arn:aws:s3:::testbucket1"))
    assert len(result) == 1
    assert result[0].configuration.attribute_values['Name'] == 'testbucket1'

    # Verify that the missing bucket is ignored -- as it will be processed presumably later:
    result = list(DurableS3Model.query("arn:aws:s3:::notinthecurrenttable"))
    assert not result

    # Verify that the proper log statements were reached:
    assert mocked_logger.debug.called
    assert mocked_logger.error.called
    debug_calls = [
        '[-->] Item with ARN: arn:aws:s3:::notinthecurrenttable was too big for SNS '
        '-- fetching it from the Current table...',
        '[+] Saving new revision to durable table.',
        '[-->] Item with ARN: arn:aws:s3:::testbucket1 was too big for SNS -- fetching it from the Current table...'
    ]
    for dc in debug_calls:
        mocked_logger.debug.assert_any_call(dc)

    mocked_logger.error.assert_called_once_with(
        '[?] Received item too big for SNS, and was not able to '
        'find the original item with ARN: arn:aws:s3:::notinthecurrenttable')

    # Unmock the logger:
    historical.common.dynamodb.log = old_logger
Beispiel #10
0
def test_differ(current_s3_table, durable_s3_table, mock_lambda_environment):
    from historical.s3.models import DurableS3Model
    from historical.s3.differ import handler
    from historical.models import TTL_EXPIRY

    ttl = int(time.time() + TTL_EXPIRY)
    new_bucket = S3_BUCKET.copy()
    new_bucket['eventTime'] = datetime(
        year=2017, month=5, day=12, hour=10, minute=30,
        second=0).isoformat() + 'Z'
    new_bucket["ttl"] = ttl
    ddb_record = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=new_bucket, Keys={'arn': new_bucket['arn']}),
                                                  eventName='INSERT'),
                            default=serialize)

    new_item = RecordsFactory(records=[
        SQSDataFactory(body=json.dumps(ddb_record, default=serialize))
    ])
    data = json.loads(json.dumps(new_item, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableS3Model.count() == 1

    # Test duplicates don't change anything:
    data = json.loads(json.dumps(new_item, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableS3Model.count() == 1

    # Test ephemeral changes don't add new models:
    ephemeral_changes = S3_BUCKET.copy()
    ephemeral_changes["eventTime"] = \
        datetime(year=2017, month=5, day=12, hour=11, minute=30, second=0).isoformat() + 'Z'
    ephemeral_changes["configuration"]["_version"] = 99999
    ephemeral_changes["ttl"] = ttl

    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=ephemeral_changes, Keys={'arn': ephemeral_changes['arn']}),
                                            eventName='MODIFY'),
                      default=serialize)

    data = RecordsFactory(
        records=[SQSDataFactory(body=json.dumps(data, default=serialize))])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableS3Model.count() == 1

    # Add an update:
    new_changes = S3_BUCKET.copy()
    new_date = datetime(
        year=2017, month=5, day=12, hour=11, minute=30,
        second=0).isoformat() + 'Z'
    new_changes["eventTime"] = new_date
    new_changes["Tags"] = {"ANew": "Tag"}
    new_changes["configuration"]["Tags"] = {"ANew": "Tag"}
    new_changes["ttl"] = ttl
    data = json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
        NewImage=new_changes, Keys={'arn': new_changes['arn']}),
                                            eventName='MODIFY'),
                      default=serialize)
    data = RecordsFactory(
        records=[SQSDataFactory(body=json.dumps(data, default=serialize))])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    results = list(DurableS3Model.query("arn:aws:s3:::testbucket1"))
    assert len(results) == 2
    assert results[1].Tags["ANew"] == results[
        1].configuration.attribute_values["Tags"]["ANew"] == "Tag"
    assert results[1].eventTime == new_date

    # And deletion (ensure new record -- testing TTL): -- And with SNS for testing completion
    delete_bucket = S3_BUCKET.copy()
    delete_bucket["eventTime"] = datetime(
        year=2017, month=5, day=12, hour=12, minute=30,
        second=0).isoformat() + 'Z'
    delete_bucket["ttl"] = ttl
    data = json.dumps(DynamoDBRecordFactory(
        dynamodb=DynamoDBDataFactory(OldImage=delete_bucket,
                                     Keys={'arn': delete_bucket['arn']}),
        eventName='REMOVE',
        userIdentity=UserIdentityFactory(
            type='Service', principalId='dynamodb.amazonaws.com')),
                      default=serialize)
    data = RecordsFactory(records=[
        SQSDataFactory(
            body=json.dumps(SnsDataFactory(Message=data), default=serialize))
    ])
    data = json.loads(json.dumps(data, default=serialize))
    handler(data, mock_lambda_environment)
    assert DurableS3Model.count() == 3
Beispiel #11
0
def test_poller_processor_handler(historical_role, buckets,
                                  mock_lambda_environment, historical_sqs,
                                  swag_accounts):
    from historical.s3.poller import poller_processor_handler as handler

    # Create the events and SQS records:
    messages = make_poller_events()
    event = json.loads(
        json.dumps(RecordsFactory(records=messages), default=serialize))

    # Run the collector:
    handler(event, None)

    # Need to ensure that 51 total buckets were added into SQS:
    sqs = boto3.client("sqs", region_name="us-east-1")
    queue_url = get_queue_url(os.environ['POLLER_QUEUE_NAME'])

    all_buckets = {"SWAG": True}
    for i in range(0, 50):
        all_buckets["testbucket{}".format(i)] = True

    # Loop through the queue and make sure all buckets are accounted for:
    for i in range(0, 6):
        messages = sqs.receive_message(QueueUrl=queue_url,
                                       MaxNumberOfMessages=10)['Messages']
        message_ids = []

        for m in messages:
            message_ids.append({
                "Id": m['MessageId'],
                "ReceiptHandle": m['ReceiptHandle']
            })
            data = s3_polling_schema.loads(m['Body']).data

            assert all_buckets[data["detail"]["request_parameters"]
                               ["bucket_name"]]
            assert datetime.strptime(
                data["detail"]["request_parameters"]["creation_date"],
                '%Y-%m-%dT%H:%M:%SZ')
            assert data["detail"]["event_source"] == "historical.s3.poller"

            # Remove from the dict (at the end, there should be 0 items left)
            del all_buckets[data["detail"]["request_parameters"]
                            ["bucket_name"]]

        sqs.delete_message_batch(QueueUrl=queue_url, Entries=message_ids)

    assert len(all_buckets) == 0

    # Check that an exception raised doesn't break things:
    import historical.s3.poller

    def mocked_poller(account, stream):
        raise ClientError({"Error": {
            "Message": "",
            "Code": "AccessDenied"
        }}, "sts:AssumeRole")

    old_method = historical.s3.poller.produce_events  # For pytest inter-test issues...
    historical.s3.poller.produce_events = mocked_poller
    handler(event, None)
    historical.s3.poller.produce_events = old_method
def test_collector(historical_role, mock_lambda_environment, historical_sqs, security_groups,
                   current_security_group_table):
    """Tests the Collector."""
    # This should NOT be called at first:
    def mock_describe_security_groups(**kwargs):
        assert False

    patch_sgs = patch('historical.security_group.collector.describe_security_groups', mock_describe_security_groups)
    patch_sgs.start()

    from historical.security_group.models import CurrentSecurityGroupModel
    from historical.security_group.collector import handler
    from cloudaux.aws.ec2 import describe_security_groups
    sg_details = describe_security_groups(
        account_number='012345678910',
        assume_role='Historical',
        region='us-east-1',
        GroupIds=[security_groups['GroupId']])['SecurityGroups'][0]

    event = CloudwatchEventFactory(
        detail=DetailFactory(
            requestParameters={'groupId': security_groups['GroupId']},
            eventName='PollSecurityGroups',
            collected=sg_details))
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)
    patch_sgs.stop()
    group = list(CurrentSecurityGroupModel.scan())
    assert len(group) == 1

    # Validate that Tags are correct:
    assert len(group[0].Tags.attribute_values) == 2
    assert group[0].Tags.attribute_values['Some'] == 'Value'
    assert group[0].Tags.attribute_values['Empty'] == '<empty>'
    group[0].delete()

    # Standard SG events:
    event = CloudwatchEventFactory(
        detail=DetailFactory(
            requestParameters={'groupId': security_groups['GroupId']},
            eventName='CreateSecurityGroup'
        ),
    )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)

    group = list(CurrentSecurityGroupModel.scan())
    assert len(group) == 1

    # Validate that Tags are correct:
    assert len(group[0].Tags.attribute_values) == 2
    assert group[0].Tags.attribute_values['Some'] == 'Value'
    assert group[0].Tags.attribute_values['Empty'] == '<empty>'

    event = CloudwatchEventFactory(
        detail=DetailFactory(
            requestParameters={'groupId': security_groups['GroupId']},
            eventName='DeleteSecurityGroup'
        ),
    )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)

    assert CurrentSecurityGroupModel.count() == 0

    # Try to get it again -- this time, add the SG ID to the responseElements:
    event = CloudwatchEventFactory(
        detail=DetailFactory(
            responseElements={'groupId': security_groups['GroupId']},
            eventName='CreateSecurityGroup'
        ),
    )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)
    assert CurrentSecurityGroupModel.count() == 1

    # Create a security group in an off-region. Make sure that the ARN of the Security Group is correct and NOT
    # set to the CURRENT_REGION:
    client = boto3.client('ec2', region_name='eu-west-2')
    sg_id = client.create_security_group(GroupName='London', Description='London', VpcId='vpc-test')['GroupId']
    sg_details = describe_security_groups(
        account_number='123456789012',
        assume_role='Historical',
        region='eu-west-2',
        GroupIds=[sg_id])['SecurityGroups'][0]

    event = CloudwatchEventFactory(
        detail=DetailFactory(
            requestParameters={'groupId': sg_id},
            eventName='PollSecurityGroups',
            awsRegion='eu-west-2',
            collected=sg_details))
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(records=[SQSDataFactory(body=data)])
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, mock_lambda_environment)
    group = list(CurrentSecurityGroupModel.query(f'arn:aws:ec2:eu-west-2:123456789012:security-group/{sg_id}'))
    assert len(group) == 1
def test_collector(historical_role, mock_lambda_environment, {{cookiecutter.technology_slug}}s):
    from .models import Current{{cookiecutter.technology_slug | titlecase}}Model
    from .collector import handler

    # TODO modify event
    event = CloudwatchEventFactory(
        detail=DetailFactory(
            requestParameters={}, # e.g. {'vpcId': vpcs['VpcId']},
            eventName='', # e.g. 'CreateVpc'
        ),
    )
    data = json.dumps(event, default=serialize)
    data = RecordsFactory(
        records=[
            KinesisRecordFactory(
                kinesis=KinesisDataFactory(data=data))
        ]
    )
    data = json.dumps(data, default=serialize)
    data = json.loads(data)

    handler(data, None)

    assert Current{{cookiecutter.technology_slug | titlecase}}Model.count() == 1

    # TODO modify delete event
    event = CloudwatchEventFactory(
        detail=DetailFactory(
            requestParameters={}, # e.g. {'vpcId': vpcs['VpcId']},
            eventName='', # e.g. ''DeleteVpc'
        ),