def test_collector(historical_role, buckets, mock_lambda_environment, swag_accounts, current_s3_table): from historical.s3.collector import handler now = datetime.utcnow().replace(tzinfo=None, microsecond=0) create_event = CloudwatchEventFactory( detail=DetailFactory(requestParameters={"bucketName": "testbucket1"}, source="aws.s3", eventName="CreateBucket", eventTime=now)) data = json.dumps(create_event, default=serialize) data = KinesisRecordsFactory( records=[KinesisRecordFactory(kinesis=KinesisDataFactory(data=data))]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, None) result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1")) assert len(result) == 1 # Verify that the tags are duplicated in the top level and configuration: assert len(result[0].Tags.attribute_values) == len( result[0].configuration.attribute_values["Tags"]) == 1 assert result[0].Tags.attribute_values["theBucketName"] == \ result[0].configuration.attribute_values["Tags"]["theBucketName"] == "testbucket1" # noqa # Polling (make sure the date is included): polling_event = CloudwatchEventFactory( detail=DetailFactory(requestParameters={ "bucketName": "testbucket1", "creationDate": now }, source="aws.s3", eventName="DescribeBucket", eventTime=now)) data = json.dumps(polling_event, default=serialize) data = KinesisRecordsFactory( records=[KinesisRecordFactory(kinesis=KinesisDataFactory(data=data))]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, None) assert CurrentS3Model.count() == 1 # Load the config and verify the polling timestamp is in there: result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1")) assert result[0].configuration["CreationDate"] == now.isoformat() + "Z" # And deletion: delete_event = CloudwatchEventFactory( detail=DetailFactory(requestParameters={"bucketName": "testbucket1"}, source="aws.s3", eventName="DeleteBucket", eventTime=now)) data = json.dumps(delete_event, default=serialize) data = KinesisRecordsFactory( records=[KinesisRecordFactory(kinesis=KinesisDataFactory(data=data))]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, None) assert CurrentS3Model.count() == 0
def test_collector(historical_role, buckets, mock_lambda_environment, swag_accounts, current_s3_table): """Test the Collector.""" from historical.s3.models import CurrentS3Model from historical.s3.collector import handler now = datetime.utcnow().replace(tzinfo=None, microsecond=0) create_event = CloudwatchEventFactory( detail=DetailFactory(requestParameters={"bucketName": "testbucket1"}, eventSource="aws.s3", eventName="CreateBucket", eventTime=now)) data = json.dumps(create_event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1")) assert len(result) == 1 assert result[0].Tags.attribute_values["theBucketName"] == "testbucket1" assert result[0].eventSource == "aws.s3" # Polling (make sure the date is included): polling_event = CloudwatchEventFactory( detail=DetailFactory(requestParameters={ "bucketName": "testbucket1", "creationDate": now }, eventSource="historical.s3.poller", eventName="PollS3", eventTime=now)) data = json.dumps(polling_event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) assert CurrentS3Model.count() == 1 # Load the config and verify the polling timestamp is in there: result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1")) assert result[0].configuration["CreationDate"] == now.isoformat() + "Z" assert result[0].eventSource == "historical.s3.poller" # And deletion: delete_event = CloudwatchEventFactory( detail=DetailFactory(requestParameters={"bucketName": "testbucket1"}, eventSource="aws.s3", eventName="DeleteBucket", eventTime=now)) data = json.dumps(delete_event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) assert CurrentS3Model.count() == 0
def test_collector_on_deleted_bucket(historical_role, buckets, mock_lambda_environment, swag_accounts, current_s3_table): from historical.s3.collector import handler # If an event arrives on a bucket that is deleted, then it should skip # and wait until the Deletion event arrives. create_event = CloudwatchEventFactory( detail=DetailFactory( requestParameters={ "bucketName": "not-a-bucket" }, source="aws.s3", eventName="PutBucketPolicy", ) ) create_event_data = json.dumps(create_event, default=serialize) data = KinesisRecordsFactory( records=[ KinesisRecordFactory( kinesis=KinesisDataFactory(data=create_event_data)) ] ) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, None) assert CurrentS3Model.count() == 0
def test_historical_table_fixture(historical_table): assert CurrentS3Model.count() == 10