def describe_group(record): """Attempts to describe group ids.""" account_id = record['account'] group_name = cloudwatch.filter_request_parameters('groupName', record) vpc_id = cloudwatch.filter_request_parameters('vpcId', record) group_id = cloudwatch.filter_request_parameters('groupId', record) try: if vpc_id and group_name: return describe_security_groups(account_number=account_id, assume_role=HISTORICAL_ROLE, region=CURRENT_REGION, Filters=[{ 'Name': 'group-name', 'Values': [group_name] }, { 'Name': 'vpc-id', 'Values': [vpc_id] }])['SecurityGroups'] elif group_id: return describe_security_groups(account_number=account_id, assume_role=HISTORICAL_ROLE, region=CURRENT_REGION, GroupIds=[group_id ])['SecurityGroups'] else: raise Exception( 'Describe requires a groupId or a groupName and VpcId.') except ClientError as e: if e.response['Error']['Code'] == 'InvalidGroup.NotFound': return [] raise e
def handler(event, context): """ Historical security group event poller. This poller is run at a set interval in order to ensure that changes do not go undetected by historical. Historical pollers generate `polling events` which simulate changes. These polling events contain configuration data such as the account/region defining where the collector should attempt to gather data from. """ log.debug('Running poller. Configuration: {}'.format(event)) for account in get_historical_accounts(): try: groups = describe_security_groups(account_number=account['id'], assume_role=HISTORICAL_ROLE, region=CURRENT_REGION) events = [ security_group_polling_schema.serialize(account['id'], g) for g in groups['SecurityGroups'] ] produce_events( events, os.environ.get('HISTORICAL_STREAM', 'HistoricalSecurityGroupPollerStream')) log.debug( 'Finished generating polling events. Account: {} Events Created: {}' .format(account['id'], len(events))) except ClientError as e: log.warning( 'Unable to generate events for account. AccountId: {account_id} Reason: {reason}' .format(account_id=account['id'], reason=e))
def describe_group(record, region): """Attempts to describe group ids.""" account_id = record['account'] group_name = cloudwatch.filter_request_parameters('groupName', record) vpc_id = cloudwatch.filter_request_parameters('vpcId', record) group_id = cloudwatch.filter_request_parameters('groupId', record, look_in_response=True) # Did this get collected already by the poller? if cloudwatch.get_collected_details(record): LOG.debug( f"[<--] Received already collected security group data: {record['detail']['collected']}" ) return [record['detail']['collected']] try: # Always depend on Group ID first: if group_id: # pylint: disable=R1705 return describe_security_groups(account_number=account_id, assume_role=HISTORICAL_ROLE, region=region, GroupIds=[group_id ])['SecurityGroups'] elif vpc_id and group_name: return describe_security_groups(account_number=account_id, assume_role=HISTORICAL_ROLE, region=region, Filters=[{ 'Name': 'group-name', 'Values': [group_name] }, { 'Name': 'vpc-id', 'Values': [vpc_id] }])['SecurityGroups'] else: raise Exception( '[X] Did not receive Group ID or VPC/Group Name pairs. ' f'We got: ID: {group_id} VPC/Name: {vpc_id}/{group_name}.') except ClientError as exc: if exc.response['Error']['Code'] == 'InvalidGroup.NotFound': return [] raise exc
def describe_group(record): """Attempts to describe group ids.""" account_id = record['account'] group_name = cloudwatch.filter_request_parameters('groupName', record) vpc_id = cloudwatch.filter_request_parameters('vpcId', record) group_id = cloudwatch.filter_request_parameters('groupId', record, look_in_response=True) try: # Always depend on Group ID first: if group_id: return describe_security_groups( account_number=account_id, assume_role=HISTORICAL_ROLE, region=CURRENT_REGION, GroupIds=[group_id] )['SecurityGroups'] elif vpc_id and group_name: return describe_security_groups( account_number=account_id, assume_role=HISTORICAL_ROLE, region=CURRENT_REGION, Filters=[ { 'Name': 'group-name', 'Values': [group_name] }, { 'Name': 'vpc-id', 'Values': [vpc_id] } ] )['SecurityGroups'] else: raise Exception('[X] Did not receive Group ID or VPC/Group Name pairs. ' 'We got: ID: {} VPC/Name: {}/{}.'.format(group_id, vpc_id, group_name)) except ClientError as e: if e.response['Error']['Code'] == 'InvalidGroup.NotFound': return [] raise e
def mock_describe_security_groups(**kwargs): from cloudaux.aws.ec2 import describe_security_groups # Did we receive a NextToken? (this will happen on the second run through to verify that # this logic is being reached: if kwargs.get('NextToken'): assert kwargs['NextToken'] == 'MOARRESULTS' result = describe_security_groups(**kwargs) result['NextToken'] = 'MOARRESULTS' return result
def poller_processor_handler(event, context): """ Historical Security Group Poller Processor. This will receive events from the Poller Tasker, and will list all objects of a given technology for an account/region pair. This will generate `polling events` which simulate changes. These polling events contain configuration data such as the account/region defining where the collector should attempt to gather data from. """ log.debug('[@] Running Poller...') queue_url = get_queue_url( os.environ.get('POLLER_QUEUE_NAME', 'HistoricalSecurityGroupPoller')) records = deserialize_records(event['Records']) for record in records: # Skip accounts that have role assumption errors: try: groups = describe_security_groups( account_number=record['account_id'], assume_role=HISTORICAL_ROLE, region=record['region']) events = [ security_group_polling_schema.serialize( record['account_id'], g) for g in groups['SecurityGroups'] ] produce_events(events, queue_url) log.debug('[@] Finished generating polling events. Account: {}/{} ' 'Events Created: {}'.format(record['account_id'], record['region'], len(events))) except ClientError as e: log.error( '[X] Unable to generate events for account/region. Account Id/Region: {account_id}/{region}' ' Reason: {reason}'.format(account_id=record['account_id'], region=record['region'], reason=e)) log.debug('[@] Finished generating polling events. Events Created: {}'. format(len(record['account_id'])))
def poller_processor_handler(event, context): # pylint: disable=W0613 """ Historical Security Group Poller Processor. This will receive events from the Poller Tasker, and will list all objects of a given technology for an account/region pair. This will generate `polling events` which simulate changes. These polling events contain configuration data such as the account/region defining where the collector should attempt to gather data from. """ LOG.debug('[@] Running Poller...') collector_poller_queue_url = get_queue_url( os.environ.get('POLLER_QUEUE_NAME', 'HistoricalSecurityGroupPoller')) takser_queue_url = get_queue_url( os.environ.get('POLLER_TASKER_QUEUE_NAME', 'HistoricalSecurityGroupPollerTasker')) poller_task_schema = HistoricalPollerTaskEventModel() records = deserialize_records(event['Records']) for record in records: # Skip accounts that have role assumption errors: try: # Did we get a NextToken? if record.get('NextToken'): LOG.debug( f"[@] Received pagination token: {record['NextToken']}") groups = describe_security_groups( account_number=record['account_id'], assume_role=HISTORICAL_ROLE, region=record['region'], MaxResults=200, NextToken=record['NextToken']) else: groups = describe_security_groups( account_number=record['account_id'], assume_role=HISTORICAL_ROLE, region=record['region'], MaxResults=200) # FIRST THINGS FIRST: Did we get a `NextToken`? If so, we need to enqueue that ASAP because # 'NextToken`s expire in 60 seconds! if groups.get('NextToken'): logging.debug( f"[-->] Pagination required {groups['NextToken']}. Tasking continuation." ) produce_events([ poller_task_schema.serialize_me( record['account_id'], record['region'], next_token=groups['NextToken']) ], takser_queue_url) # Task the collector to perform all the DDB logic -- this will pass in the collected data to the # collector in very small batches. events = [ SECURITY_GROUP_POLLING_SCHEMA.serialize( record['account_id'], g, record['region']) for g in groups['SecurityGroups'] ] produce_events(events, collector_poller_queue_url, batch_size=3) LOG.debug( f"[@] Finished generating polling events. Account: {record['account_id']}/{record['region']} " f"Events Created: {len(events)}") except ClientError as exc: LOG.error( f"[X] Unable to generate events for account/region. Account Id/Region: {record['account_id']}" f"/{record['region']} Reason: {exc}")
def test_collector(historical_role, mock_lambda_environment, historical_sqs, security_groups, current_security_group_table): """Tests the Collector.""" # This should NOT be called at first: def mock_describe_security_groups(**kwargs): assert False patch_sgs = patch('historical.security_group.collector.describe_security_groups', mock_describe_security_groups) patch_sgs.start() from historical.security_group.models import CurrentSecurityGroupModel from historical.security_group.collector import handler from cloudaux.aws.ec2 import describe_security_groups sg_details = describe_security_groups( account_number='012345678910', assume_role='Historical', region='us-east-1', GroupIds=[security_groups['GroupId']])['SecurityGroups'][0] event = CloudwatchEventFactory( detail=DetailFactory( requestParameters={'groupId': security_groups['GroupId']}, eventName='PollSecurityGroups', collected=sg_details)) data = json.dumps(event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) patch_sgs.stop() group = list(CurrentSecurityGroupModel.scan()) assert len(group) == 1 # Validate that Tags are correct: assert len(group[0].Tags.attribute_values) == 2 assert group[0].Tags.attribute_values['Some'] == 'Value' assert group[0].Tags.attribute_values['Empty'] == '<empty>' group[0].delete() # Standard SG events: event = CloudwatchEventFactory( detail=DetailFactory( requestParameters={'groupId': security_groups['GroupId']}, eventName='CreateSecurityGroup' ), ) data = json.dumps(event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) group = list(CurrentSecurityGroupModel.scan()) assert len(group) == 1 # Validate that Tags are correct: assert len(group[0].Tags.attribute_values) == 2 assert group[0].Tags.attribute_values['Some'] == 'Value' assert group[0].Tags.attribute_values['Empty'] == '<empty>' event = CloudwatchEventFactory( detail=DetailFactory( requestParameters={'groupId': security_groups['GroupId']}, eventName='DeleteSecurityGroup' ), ) data = json.dumps(event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) assert CurrentSecurityGroupModel.count() == 0 # Try to get it again -- this time, add the SG ID to the responseElements: event = CloudwatchEventFactory( detail=DetailFactory( responseElements={'groupId': security_groups['GroupId']}, eventName='CreateSecurityGroup' ), ) data = json.dumps(event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) assert CurrentSecurityGroupModel.count() == 1 # Create a security group in an off-region. Make sure that the ARN of the Security Group is correct and NOT # set to the CURRENT_REGION: client = boto3.client('ec2', region_name='eu-west-2') sg_id = client.create_security_group(GroupName='London', Description='London', VpcId='vpc-test')['GroupId'] sg_details = describe_security_groups( account_number='123456789012', assume_role='Historical', region='eu-west-2', GroupIds=[sg_id])['SecurityGroups'][0] event = CloudwatchEventFactory( detail=DetailFactory( requestParameters={'groupId': sg_id}, eventName='PollSecurityGroups', awsRegion='eu-west-2', collected=sg_details)) data = json.dumps(event, default=serialize) data = RecordsFactory(records=[SQSDataFactory(body=data)]) data = json.dumps(data, default=serialize) data = json.loads(data) handler(data, mock_lambda_environment) group = list(CurrentSecurityGroupModel.query(f'arn:aws:ec2:eu-west-2:123456789012:security-group/{sg_id}')) assert len(group) == 1