def create_delete_model(record): """Create a security group model from a record.""" data = cloudwatch.get_historical_base_info(record) group_id = cloudwatch.filter_request_parameters('groupId', record) # vpc_id = cloudwatch.filter_request_parameters('vpcId', record) # group_name = cloudwatch.filter_request_parameters('groupName', record) arn = get_arn(group_id, cloudwatch.get_region(record), record['account']) LOG.debug(f'[-] Deleting Dynamodb Records. Hash Key: {arn}') # Tombstone these records so that the deletion event time can be accurately tracked. data.update({'configuration': {}}) items = list(CurrentSecurityGroupModel.query(arn, limit=1)) if items: model_dict = items[0].__dict__['attribute_values'].copy() model_dict.update(data) model = CurrentSecurityGroupModel(**model_dict) model.save() return model return None
def capture_update_records(records): """Writes all updated configuration info to DynamoDB""" for record in records: data = cloudwatch.get_historical_base_info(record) group = describe_group(record) if len(group) > 1: raise Exception('[X] Multiple groups found. Record: {record}'.format(record=record)) if not group: log.warning('[?] No group information found. Record: {record}'.format(record=record)) continue group = group[0] # determine event data for group log.debug('Processing group. Group: {}'.format(group)) data.update({ 'GroupId': group['GroupId'], 'GroupName': group['GroupName'], 'Description': group['Description'], 'VpcId': group.get('VpcId'), 'Tags': group.get('Tags', []), 'arn': get_arn(group['GroupId'], group['OwnerId']), 'OwnerId': group['OwnerId'], 'configuration': group, 'Region': cloudwatch.get_region(record) }) log.debug('Writing Dynamodb Record. Records: {record}'.format(record=data)) current_revision = CurrentSecurityGroupModel(**data) current_revision.save()
def capture_update_records(records): """Writes all updated configuration info to DynamoDB""" for record in records: data = cloudwatch.get_historical_base_info(record) vpc = describe_vpc(record) if len(vpc) > 1: raise Exception('Multiple vpcs found. Record: {record}'.format(record=record)) if not vpc: log.warning('No vpc information found. Record: {record}'.format(record=record)) continue vpc = vpc[0] # determine event data for vpc log.debug('Processing vpc. Vpc: {}'.format(vpc)) data.update({ 'VpcId': vpc.get('VpcId'), 'Tags': vpc.get('Tags', []), 'arn': get_arn(vpc['VpcId'], data['accountId']), 'configuration': vpc, 'State': vpc.get('State'), 'IsDefault': vpc.get('IsDefault'), 'CidrBlock': vpc.get('CidrBlock'), 'Name': get_vpc_name(vpc), 'Region': cloudwatch.get_region(record) }) log.debug('Writing Dynamodb Record. Records: {record}'.format(record=data)) current_revision = CurrentVPCModel(**data) current_revision.save()
def capture_update_records(records): """Writes all updated configuration info to DynamoDB""" for record in records: data = cloudwatch.get_historical_base_info(record) vpc = describe_vpc(record) if len(vpc) > 1: raise Exception(f'[X] Multiple vpcs found. Record: {record}') if not vpc: LOG.warning(f'[?] No vpc information found. Record: {record}') continue vpc = vpc[0] # determine event data for vpc LOG.debug(f'Processing vpc. VPC: {vpc}') data.update({ 'VpcId': vpc.get('VpcId'), 'arn': get_arn(vpc['VpcId'], cloudwatch.get_region(record), data['accountId']), 'configuration': vpc, 'State': vpc.get('State'), 'IsDefault': vpc.get('IsDefault'), 'CidrBlock': vpc.get('CidrBlock'), 'Name': get_vpc_name(vpc), 'Region': cloudwatch.get_region(record), 'version': VERSION }) data['Tags'] = pull_tag_dict(vpc) LOG.debug(f'[+] Writing DynamoDB Record. Records: {data}') current_revision = CurrentVPCModel(**data) current_revision.save()
def capture_update_records(records): """Writes all updated configuration info to DynamoDB""" for rec in records: data = cloudwatch.get_historical_base_info(rec) group = describe_group(rec, cloudwatch.get_region(rec)) if len(group) > 1: raise Exception(f'[X] Multiple groups found. Record: {rec}') if not group: LOG.warning(f'[?] No group information found. Record: {rec}') continue group = group[0] # Determine event data for group - and pop off items that are going to the top-level: LOG.debug(f'Processing group. Group: {group}') data.update({ 'GroupId': group['GroupId'], 'GroupName': group.pop('GroupName'), 'VpcId': group.pop('VpcId', None), 'arn': get_arn(group.pop('GroupId'), cloudwatch.get_region(rec), group.pop('OwnerId')), 'Region': cloudwatch.get_region(rec) }) data['Tags'] = pull_tag_dict(group) # Set the remaining items to the configuration: data['configuration'] = group # Set the version: data['version'] = VERSION LOG.debug(f'[+] Writing Dynamodb Record. Records: {data}') current_revision = CurrentSecurityGroupModel(**data) current_revision.save()
def test_poller_processor_handler(historical_sqs, historical_role, mock_lambda_environment, security_groups, swag_accounts): """Test the Poller's processing component that tasks the collector.""" # Mock this so it returns a `NextToken`: def mock_describe_security_groups(**kwargs): from cloudaux.aws.ec2 import describe_security_groups # Did we receive a NextToken? (this will happen on the second run through to verify that # this logic is being reached: if kwargs.get('NextToken'): assert kwargs['NextToken'] == 'MOARRESULTS' result = describe_security_groups(**kwargs) result['NextToken'] = 'MOARRESULTS' return result patch_sgs = patch('historical.security_group.poller.describe_security_groups', mock_describe_security_groups) patch_sgs.start() from historical.security_group.poller import poller_processor_handler as handler from historical.common import cloudwatch # Create the events and SQS records: messages = make_poller_events() event = json.loads(json.dumps(RecordsFactory(records=messages), default=serialize)) # Run the poller handler: handler(event, mock_lambda_environment) # Need to ensure that 3 total SGs were added into SQS: sqs = boto3.client("sqs", region_name="us-east-1") queue_url = get_queue_url(os.environ['POLLER_QUEUE_NAME']) messages = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10)['Messages'] assert len(messages) == 3 # Verify that the region is properly propagated through, and that we got the collected data: for msg in messages: body = json.loads(msg['Body']) assert cloudwatch.get_region(body) == 'us-east-1' assert body['detail']['collected']['OwnerId'] == '123456789012' assert not body['detail']['collected'].get('ResponseMetadata') # Now, verify that the pagination was sent in properly to SQS tasker queue: queue_url = get_queue_url(os.environ['POLLER_TASKER_QUEUE_NAME']) messages = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10)['Messages'] assert len(messages) == 1 assert json.loads(messages[0]['Body'])['NextToken'] == 'MOARRESULTS' # Re-run the poller: messages[0]['body'] = messages[0]['Body'] # Need to change the casing handler({'Records': messages}, mock_lambda_environment) patch_sgs.stop()
def create_delete_model(record): """Create an S3 model from a record.""" arn = "arn:aws:s3:::{}".format(cloudwatch.filter_request_parameters('bucketName', record)) log.debug('[-] Deleting Dynamodb Records. Hash Key: {arn}'.format(arn=arn)) data = { 'arn': arn, 'principalId': cloudwatch.get_principal(record), 'userIdentity': cloudwatch.get_user_identity(record), 'accountId': record['account'], 'eventTime': record['detail']['eventTime'], 'BucketName': cloudwatch.filter_request_parameters('bucketName', record), 'Region': cloudwatch.get_region(record), 'Tags': {}, 'configuration': {}, 'eventSource': record["detail"]["eventSource"] } return CurrentS3Model(**data)
def test_get_region(): from historical.common.cloudwatch import get_region event = CloudwatchEventFactory() data = json.loads(json.dumps(event, default=serialize)) assert get_region(data) == 'us-east-1'
def test_get_region(): """Tests that the Region can be pulled out of the CloudWatch Event.""" from historical.common.cloudwatch import get_region event = CloudwatchEventFactory() data = json.loads(json.dumps(event, default=serialize)) assert get_region(data) == 'us-east-1'