def parse_file_system(self, global_params, region, file_system): """ :param global_params: :param region: :param file_system: :return: """ fs_id = file_system.pop('FileSystemId') if 'Name' in file_system: file_system['name'] = file_system.pop('Name') else: file_system['name'] = None # Get tags file_system['tags'] = handle_truncated_response( api_clients[region].describe_tags, {'FileSystemId': fs_id}, ['Tags'])['Tags'] # Get mount targets mount_targets = handle_truncated_response( api_clients[region].describe_mount_targets, {'FileSystemId': fs_id}, ['MountTargets'])['MountTargets'] file_system['mount_targets'] = {} for mt in mount_targets: mt_id = mt['MountTargetId'] file_system['mount_targets'][mt_id] = mt # Get security groups file_system['mount_targets'][mt_id][ 'security_groups'] = api_clients[ region].describe_mount_target_security_groups( MountTargetId=mt_id)['SecurityGroups'] self.file_systems[fs_id] = file_system
def parse_users(self, user, params): """ Parse a single IAM user and fetch additional data """ if user['UserName'] in self.users: return api_client = params['api_client'] # Ensure consistent attribute names across resource types user['id'] = user.pop('UserId') user['name'] = user.pop('UserName') user['arn'] = user.pop('Arn') policies = self.__get_inline_policies(api_client, 'user', user['id'], user['name']) if len(policies): user['inline_policies'] = policies user['inline_policies_count'] = len(policies) user['groups'] = [] groups = handle_truncated_response(api_client.list_groups_for_user, {'UserName': user['name']}, ['Groups'])['Groups'] for group in groups: user['groups'].append(group['GroupName']) try: user['LoginProfile'] = api_client.get_login_profile(UserName = user['name'])['LoginProfile'] except Exception as e: pass user['AccessKeys'] = api_client.list_access_keys(UserName = user['name'])['AccessKeyMetadata'] user['MFADevices'] = api_client.list_mfa_devices(UserName = user['name'])['MFADevices'] # TODO: Users signing certss self.users[user['id']] = user
def parse_roles(self, fetched_role, params): """ Parse a single IAM role and fetch additional data """ role = {} role['instances_count'] = 'N/A' # When resuming upon throttling error, skip if already fetched if fetched_role['RoleName'] in self.roles: return api_client = params['api_client'] # Ensure consistent attribute names across resource types role['id'] = fetched_role.pop('RoleId') role['name'] = fetched_role.pop('RoleName') role['arn'] = fetched_role.pop('Arn') # Get other attributes get_keys(fetched_role, role, [ 'CreateDate', 'Path']) # Get role policies policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name']) if len(policies): role['inline_policies'] = policies role['inline_policies_count'] = len(policies) # Get instance profiles profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles']) manage_dictionary(role, 'instance_profiles', {}) for profile in profiles['InstanceProfiles']: manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {}) role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn'] role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName'] # Get trust relationship role['assume_role_policy'] = {} role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument') # Save role self.roles[role['id']] = role
def parse_policies(self, fetched_policy, params): """ Parse a single IAM policy and fetch additional information """ api_client = params['api_client'] policy = {} policy['name'] = fetched_policy.pop('PolicyName') policy['id'] = fetched_policy.pop('PolicyId') policy['arn'] = fetched_policy.pop('Arn') # Download version and document policy_version = api_client.get_policy_version(PolicyArn = policy['arn'], VersionId = fetched_policy['DefaultVersionId']) policy_version = policy_version['PolicyVersion'] policy['PolicyDocument'] = policy_version['Document'] # Get attached IAM entities policy['attached_to'] = {} attached_entities = handle_truncated_response(api_client.list_entities_for_policy, {'PolicyArn': policy['arn']}, ['PolicyGroups', 'PolicyRoles', 'PolicyUsers']) for entity_type in attached_entities: resource_type = entity_type.replace('Policy', '').lower() if len(attached_entities[entity_type]): policy['attached_to'][resource_type] = [] for entity in attached_entities[entity_type]: name_field = entity_type.replace('Policy', '')[:-1] + 'Name' resource_name = entity[name_field] policy['attached_to'][resource_type].append({'name': resource_name}) # Save policy self.policies[policy['id']] = policy
def parse_roles(self, fetched_role, params): """ Parse a single IAM role and fetch additional data """ role = {} role['instances_count'] = 'N/A' # When resuming upon throttling error, skip if already fetched if fetched_role['RoleName'] in self.roles: return api_client = params['api_client'] # Ensure consistent attribute names across resource types role['id'] = fetched_role.pop('RoleId') role['name'] = fetched_role.pop('RoleName') role['arn'] = fetched_role.pop('Arn') # Get other attributes get_keys(fetched_role, role, ['CreateDate', 'Path']) # Get role policies policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name']) if len(policies): role['inline_policies'] = policies role['inline_policies_count'] = len(policies) # Get instance profiles profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles']) manage_dictionary(role, 'instance_profiles', {}) for profile in profiles['InstanceProfiles']: manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {}) role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn'] role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName'] # Get trust relationship role['assume_role_policy'] = {} role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument') # Save role self.roles[role['id']] = role
def parse_users(self, user, params): """ Parse a single IAM user and fetch additional data """ if user['UserName'] in self.users: return api_client = params['api_client'] # Ensure consistent attribute names across resource types user['id'] = user.pop('UserId') user['name'] = user.pop('UserName') user['arn'] = user.pop('Arn') policies = self.__get_inline_policies(api_client, 'user', user['id'], user['name']) if len(policies): user['inline_policies'] = policies user['inline_policies_count'] = len(policies) user['groups'] = [] groups = handle_truncated_response(api_client.list_groups_for_user, {'UserName': user['name']}, ['Groups'])[ 'Groups'] for group in groups: user['groups'].append(group['GroupName']) try: user['LoginProfile'] = api_client.get_login_profile(UserName=user['name'])['LoginProfile'] except Exception as e: pass user['AccessKeys'] = api_client.list_access_keys(UserName=user['name'])['AccessKeyMetadata'] user['MFADevices'] = api_client.list_mfa_devices(UserName=user['name'])['MFADevices'] # TODO: Users signing certss self.users[user['id']] = user
def _fetch_targets(self, api_client, q, target): ''' Make an API call defined in metadata.json. Parse the returned object as implemented in the "parse_[object name]" method. :param api_client: :param q: :param target: :return: ''' # Handle & format the target type target_type, response_attribute, list_method_name, list_params, ignore_list_error = target list_method = getattr(api_client, list_method_name) try: targets = handle_truncated_response(list_method, list_params, [response_attribute])[response_attribute] except Exception as e: if not ignore_list_error: printException(e) targets = [] setattr(self, '%s_count' % target_type, len(targets)) self.fetchstatuslogger.counts[target_type]['discovered'] += len(targets) region = api_client._client_config.region_name # Queue resources for target in targets: # call callback methods try: callback = getattr(self, 'parse_%s' % target_type[0:-1]) except: callback = self.store_target target['scout2_target_type'] = target_type if q: # Add to the queue q.put((callback, region, target))
def _fetch_targets(self, api_client, q, target): # Handle & format the target type target_type, response_attribute, list_method_name, list_params, ignore_list_error = target list_method = getattr(api_client, list_method_name) try: targets = handle_truncated_response( list_method, list_params, [response_attribute])[response_attribute] except Exception as e: if not ignore_list_error: printException(e) targets = [] setattr(self, '%s_count' % target_type, len(targets)) self.fetchstatuslogger.counts[target_type]['discovered'] += len( targets) region = api_client._client_config.region_name # Queue resources for target in targets: try: callback = getattr(self, 'parse_%s' % target_type[0:-1]) except: callback = self.store_target target['scout2_target_type'] = target_type if q: # Add to the queue q.put((callback, region, target))
def get_s3_bucket_keys(api_client, bucket_name, bucket, check_encryption, check_acls): bucket['keys'] = [] keys = handle_truncated_response(api_client.list_objects, {'Bucket': bucket_name}, ['Contents']) bucket['keys_count'] = len(keys['Contents']) key_count = 0 update_status(key_count, bucket['keys_count'], 'keys') for key in keys['Contents']: key_count += 1 key['name'] = key.pop('Key') key['LastModified'] = str(key['LastModified']) if check_encryption: try: # The encryption configuration is only accessible via an HTTP header, only returned when requesting one object at a time... k = api_client.get_object(Bucket = bucket_name, Key = key['name']) key['ServerSideEncryption'] = k['ServerSideEncryption'] if 'ServerSideEncryption' in k else None key['SSEKMSKeyId'] = k['SSEKMSKeyId'] if 'SSEKMSKeyId' in k else None except Exception as e: printException(e) continue if check_acls: try: key['grantees'] = get_s3_acls(api_client, bucket_name, bucket, key_name = key['name']) except Exception as e: continue # Save it bucket['keys'].append(key) update_status(key_count, bucket['keys_count'], 'keys')
def parse_lb(self, global_params, region, lb): """ :param global_params: :param region: :param source: :return: """ lb['arn'] = lb.pop('LoadBalancerArn') lb['name'] = lb.pop('LoadBalancerName') vpc_id = lb.pop( 'VpcId') if 'VpcId' in lb and lb['VpcId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) lb['security_groups'] = [] for sg in lb['SecurityGroups']: lb['security_groups'].append({'GroupId': sg}) lb.pop('SecurityGroups') lb['listeners'] = {} # Get listeners listeners = handle_truncated_response( api_clients[region].describe_listeners, {'LoadBalancerArn': lb['arn']}, ['Listeners'])['Listeners'] for listener in listeners: listener.pop('ListenerArn') listener.pop('LoadBalancerArn') port = listener.pop('Port') lb['listeners'][port] = listener # Get attributes lb['attributes'] = api_clients[ region].describe_load_balancer_attributes( LoadBalancerArn=lb['arn'])['Attributes'] self.vpcs[vpc_id].lbs[self.get_non_aws_id(lb['name'])] = lb
def __fetch_service(self, q, params): api_client = params['api_client'] try: while True: try: target_type, response_attribute, list_method_name, list_params, ignore_list_error = q.get() if not list_method_name: continue try: method = getattr(api_client, list_method_name) except Exception as e: printException(e) continue try: targets = handle_truncated_response(method, list_params, [response_attribute])[ response_attribute] except Exception as e: if not ignore_list_error: printException(e) targets = [] self.fetchstatuslogger.counts[target_type]['discovered'] += len(targets) for target in targets: params['q'].put((target_type, target),) except Exception as e: printException(e) finally: q.task_done() except Exception as e: printException(e) pass
def parse_policies(self, fetched_policy, params): """ Parse a single IAM policy and fetch additional information """ api_client = params['api_client'] policy = {} policy['name'] = fetched_policy.pop('PolicyName') policy['id'] = fetched_policy.pop('PolicyId') policy['arn'] = fetched_policy.pop('Arn') # Download version and document policy_version = api_client.get_policy_version(PolicyArn=policy['arn'], VersionId=fetched_policy['DefaultVersionId']) policy_version = policy_version['PolicyVersion'] policy['PolicyDocument'] = policy_version['Document'] # Get attached IAM entities policy['attached_to'] = {} attached_entities = handle_truncated_response(api_client.list_entities_for_policy, {'PolicyArn': policy['arn']}, ['PolicyGroups', 'PolicyRoles', 'PolicyUsers']) for entity_type in attached_entities: resource_type = entity_type.replace('Policy', '').lower() if len(attached_entities[entity_type]): policy['attached_to'][resource_type] = [] for entity in attached_entities[entity_type]: name_field = entity_type.replace('Policy', '')[:-1] + 'Name' resource_name = entity[name_field] policy['attached_to'][resource_type].append({'name': resource_name}) # Save policy self.policies[policy['id']] = policy
def get_organization_accounts(api_client, quiet=True): # List all accounts in the organization org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] if not quiet: printInfo('Found %d accounts in the organization.' % len(org_accounts)) printDebug(str(org_accounts)) return org_accounts
def get_access_keys(iam_client, user_name): """ :param iam_client: :param user_name: :return: """ keys = handle_truncated_response( iam_client.list_access_keys, {'UserName': user_name}, ['AccessKeyMetadata'])['AccessKeyMetadata'] return keys
def parse_file_system(self, global_params, region, file_system): """ :param global_params: :param region: :param file_system: :return: """ fs_id = file_system.pop('FileSystemId') file_system['name'] = file_system.pop('Name') # Get tags file_system['tags'] = handle_truncated_response(api_clients[region].describe_tags, {'FileSystemId': fs_id}, ['Tags'])['Tags'] # Get mount targets mount_targets = handle_truncated_response(api_clients[region].describe_mount_targets, {'FileSystemId': fs_id}, ['MountTargets'])['MountTargets'] file_system['mount_targets'] = {} for mt in mount_targets: mt_id = mt['MountTargetId'] file_system['mount_targets'][mt_id] = mt # Get security groups file_system['mount_targets'][mt_id]['security_groups'] = api_clients[region].describe_mount_target_security_groups(MountTargetId = mt_id)['SecurityGroups'] self.file_systems[fs_id] = file_system
def get_children_organizational_units(api_client, parents): ous = [] for parent in parents: children = handle_truncated_response( api_client.list_organizational_units_for_parent, {'ParentId': parent['Id']}, ['OrganizationalUnits'])['OrganizationalUnits'] if len(children): ous += get_children_organizational_units(api_client, children) else: ous.append(parent) return ous
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] parameters = handle_truncated_response(api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] if 'ParameterValue' in parameter else None param['source'] = parameter['Source'] manage_dictionary(parameter_group, 'parameters', {}) parameter_group['parameters'][parameter['ParameterName']] = param # Save (self).parameter_groups[parameter_group['name']] = parameter_group
def parse_hosted_zones(self, hosted_zone, params): """ Parse a single Route53hosted_zoness hosted_zones """ # When resuming upon throttling error, skip if already fetched hosted_zone_id = hosted_zone.pop('Id') hosted_zone['name'] = hosted_zone.pop('Name') api_client = params['api_client'] record_sets = handle_truncated_response(api_client.list_resource_record_sets, {'HostedZoneId': hosted_zone_id}, ['ResourceRecordSets']) hosted_zone.update(record_sets) #print(str(record_sets)) #record_sets = api_client.list_resource_record_sets() #hosted_zone['RecordSets'] = record_sets['Resourc'] self.hosted_zones[hosted_zone_id] = hosted_zone
def get_organization_accounts(api_client, exceptions=[], quiet=True): # List all accounts in the organization org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] if not quiet: printInfo('Found %d accounts in the organization.' % len(org_accounts)) for account in org_accounts: printDebug(str(account)) if len(exceptions): filtered_accounts = [] for account in org_accounts: if account['Id'] not in exceptions: filtered_accounts.append(account) org_accounts = filtered_accounts return org_accounts
def _get_targets(self, response_attribute, api_client, method, list_params, ignore_list_error): """ Fetch the targets, required as each provider may have particularities :return: """ if type(list_params) != list: list_params = [list_params] targets = [] for lp in list_params: targets += handle_truncated_response( method, lp, [response_attribute])[response_attribute] return targets
def get_s3_bucket_keys(api_client, bucket_name, bucket, check_encryption, check_acls): """ Get key-specific information (server-side encryption, acls, etc...) :param api_client: :param bucket_name: :param bucket: :param check_encryption: :param check_acls: :return: """ bucket['keys'] = [] keys = handle_truncated_response(api_client.list_objects, {'Bucket': bucket_name}, ['Contents']) bucket['keys_count'] = len(keys['Contents']) key_count = 0 # FIXME - commented for now as this method doesn't seem to be defined anywhere' # update_status(key_count, bucket['keys_count'], 'keys') for key in keys['Contents']: key_count += 1 key['name'] = key.pop('Key') key['LastModified'] = str(key['LastModified']) if check_encryption: try: # The encryption configuration is only accessible via an HTTP header, # only returned when requesting one object at a time... k = api_client.get_object(Bucket=bucket_name, Key=key['name']) key['ServerSideEncryption'] = k[ 'ServerSideEncryption'] if 'ServerSideEncryption' in k else None key['SSEKMSKeyId'] = k[ 'SSEKMSKeyId'] if 'SSEKMSKeyId' in k else None except Exception as e: printException(e) continue if check_acls: try: key['grantees'] = get_s3_acls(api_client, bucket_name, bucket, key_name=key['name']) except Exception as e: continue # Save it bucket['keys'].append(key)
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] try: parameters = handle_truncated_response(api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] manage_dictionary(parameter_group, 'parameters', {}) for parameter in parameters: if not parameter['IsModifiable']: # Discard non-modifiable parameters continue parameter_name = parameter.pop('ParameterName') parameter_group['parameters'][parameter_name] = parameter except Exception as e: printException(e) printError('Failed fetching DB parameters for %s' % parameter_group['name']) # Save parameter_group_id = self.get_non_aws_id(parameter_group['name']) (self).parameter_groups[parameter_group_id] = parameter_group
def parse_parameter_group(self, global_params, region, parameter_group): """ Parse a single Redshift parameter group and fetch all of its parameters :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param parameter_group: Parameter group """ pg_name = parameter_group.pop('ParameterGroupName') pg_id = self.get_non_aws_id(pg_name) # Name could be used as only letters digits or hyphens parameter_group['name'] = pg_name parameter_group['parameters'] = {} api_client = api_clients[region] parameters = handle_truncated_response(api_client.describe_cluster_parameters, {'ParameterGroupName': pg_name}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] param['source'] = parameter['Source'] parameter_group['parameters'][parameter['ParameterName']] = param (self).parameter_groups[pg_id] = parameter_group
def delete_stack_set(api_client, stack_set_name, timeout=60 * 5): """ """ printDebug('Deleting stack set %s' % stack_set_name) # Check for instances stack_instances = handle_truncated_response( api_client.list_stack_instances, {'StackSetName': stack_set_name}, ['Summaries'])['Summaries'] account_ids = [] regions = [] if len(stack_instances) > 0: for si in stack_instances: if si['Account'] not in account_ids: account_ids.append(si['Account']) if si['Region'] not in regions: regions.append(si['Region']) operation_id = api_client.delete_stack_instances( StackSetName=stack_set_name, Accounts=account_ids, Regions=regions, RetainStacks=False)['OperationId'] wait_for_operation(api_client, stack_set_name, operation_id) api_client.delete_stack_set(StackSetName=stack_set_name)
def parse_parameter_group(self, global_params, region, parameter_group): """ Parse a single Redshift parameter group and fetch all of its parameters :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param parameter_group: Parameter group """ pg_name = parameter_group.pop('ParameterGroupName') pg_id = self.get_non_provider_id( pg_name) # Name could be used as only letters digits or hyphens parameter_group['name'] = pg_name parameter_group['parameters'] = {} api_client = api_clients[region] parameters = handle_truncated_response( api_client.describe_cluster_parameters, {'ParameterGroupName': pg_name}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] param['source'] = parameter['Source'] parameter_group['parameters'][parameter['ParameterName']] = param (self).parameter_groups[pg_id] = parameter_group
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] try: parameters = handle_truncated_response( api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] manage_dictionary(parameter_group, 'parameters', {}) for parameter in parameters: if not parameter['IsModifiable']: # Discard non-modifiable parameters continue parameter_name = parameter.pop('ParameterName') parameter_group['parameters'][parameter_name] = parameter except Exception as e: printException(e) printError('Failed fetching DB parameters for %s' % parameter_group['name']) # Save parameter_group_id = self.get_non_provider_id(parameter_group['name']) (self).parameter_groups[parameter_group_id] = parameter_group
def get_s3_bucket_keys(api_client, bucket_name, bucket, check_encryption, check_acls): """ Get key-specific information (server-side encryption, acls, etc...) :param api_client: :param bucket_name: :param bucket: :param check_encryption: :param check_acls: :return: """ bucket['keys'] = [] keys = handle_truncated_response(api_client.list_objects, {'Bucket': bucket_name}, ['Contents']) bucket['keys_count'] = len(keys['Contents']) key_count = 0 update_status(key_count, bucket['keys_count'], 'keys') for key in keys['Contents']: key_count += 1 key['name'] = key.pop('Key') key['LastModified'] = str(key['LastModified']) if check_encryption: try: # The encryption configuration is only accessible via an HTTP header, only returned when requesting one object at a time... k = api_client.get_object(Bucket = bucket_name, Key = key['name']) key['ServerSideEncryption'] = k['ServerSideEncryption'] if 'ServerSideEncryption' in k else None key['SSEKMSKeyId'] = k['SSEKMSKeyId'] if 'SSEKMSKeyId' in k else None except Exception as e: printException(e) continue if check_acls: try: key['grantees'] = get_s3_acls(api_client, bucket_name, bucket, key_name = key['name']) except Exception as e: continue # Save it bucket['keys'].append(key) update_status(key_count, bucket['keys_count'], 'keys')
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] try: parameters = handle_truncated_response( api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter[ 'ParameterValue'] if 'ParameterValue' in parameter else None param['source'] = parameter['Source'] manage_dictionary(parameter_group, 'parameters', {}) parameter_group['parameters'][ parameter['ParameterName']] = param except Exception as e: printException(e) printError('Failed fetching DB parameters for %s' % parameter_group['name']) # Save parameter_group_id = self.get_non_aws_id(parameter_group['name']) (self).parameter_groups[parameter_group_id] = parameter_group
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions', help='Regions where stack instances will be created.') parser.add_argument('partition-name') parser.parser.add_argument( '--master-region', dest='master_region', default=None, required=True, help='Region where the global stacks and stack sets will be created.') parser.parser.add_argument( '--stack-prefix', dest='stack_prefix', default=None, required=True, help= 'Prefix of the CF Templates to be used when creating/updating stacks.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Get the master AWS account ID master_account_id = get_aws_account_id(credentials) # Get list of accounts ready for Stack sets api_client = connect_service('organizations', credentials, silent=True) try: org_account_ids = get_organization_account_ids(api_client, quiet=False) except: org_account_ids = [master_account_id] configured_org_account_ids = get_stackset_ready_accounts(credentials, org_account_ids, quiet=False) # Validate the stack set region regions = build_region_list('cloudformation', args.regions, args.partition_name) if args.master_region not in regions: printError( 'Error, the stack set region \'%s\' is not valid. Acceptable values are:' % args.master_region) printError(', '.join(regions)) return 42 # Connect printInfo('') api_client = connect_service('cloudformation', credentials, args.master_region, silent=True) # Establish the list of existing stacks and stack sets deployed_resources = {'stacks': {}, 'stack_sets': {}} printInfo('Fetching existing stacks and stack sets in %s in %s...' % (args.master_region, master_account_id)) for stack in handle_truncated_response( api_client.list_stacks, {}, ['StackSummaries'])['StackSummaries']: if stack['StackStatus'] not in ['CREATE_FAILED', 'DELETE_COMPLETE']: deployed_resources['stacks'][stack['StackName']] = stack for stack_set in handle_truncated_response(api_client.list_stack_sets, {'Status': 'ACTIVE'}, ['Summaries'])['Summaries']: stack_set = api_client.describe_stack_set( StackSetName=stack_set['StackSetName'])['StackSet'] deployed_resources['stack_sets'][stack_set['StackSetName']] = stack_set printInfo(' - Found %d stacks.' % len(deployed_resources['stacks'])) for stack_name in deployed_resources['stacks']: printInfo(' - %s' % stack_name) printInfo(' - Found %d stacks sets.' % len(deployed_resources['stack_sets'])) for stack_set_name in deployed_resources['stack_sets']: printInfo(' - %s' % stack_set_name) # Create the list of stacks to deploy templates = get_cloudformation_templates(args.stack_prefix) # Master stacks for stack_name in sorted(templates['master_stacks'].keys()): if stack_name not in deployed_resources['stacks']: create_stack(api_client, stack_name, templates['master_stacks'][stack_name]['file_path'], wait_for_completion=templates['master_stacks'] [stack_name]['wait_for_completion']) elif resource_older_than_template( 'stack', deployed_resources['stacks'][stack_name], templates['master_stacks'][stack_name]['file_path']): update_stack(api_client, stack_name, templates['master_stacks'][stack_name]['file_path'], wait_for_completion=templates['master_stacks'] [stack_name]['wait_for_completion']) if len(configured_org_account_ids) == 0: printInfo( '\nNo account IDs that support stack sets were found, skipping stack set configuration.' ) return return # Stack sets for stack_set_name in sorted(templates['master_stack_sets'].keys()): if stack_set_name not in deployed_resources['stack_sets']: create_stack_set( api_client, stack_set_name, templates['master_stack_sets'][stack_set_name]['file_path'], wait_for_completion=True) elif resource_older_than_template( 'stack_set', deployed_resources['stack_sets'][stack_set_name], templates['master_stack_sets'][stack_set_name]['file_path']): update_stack_set( api_client, stack_set_name, templates['master_stack_sets'][stack_set_name]['file_path'], wait_for_completion=True)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('common-groups', default=[], nargs='+', help='List of groups each IAM user should belong to.') parser.add_argument( 'category-groups', default=[], nargs='+', help='List of category groups; each IAM user must belong to one.') parser.add_argument( 'category-regex', default=[], nargs='+', help='List of regex enabling auto-assigment of category groups.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Read creds credentials = read_creds(args.profile[0]) if not credentials['AccessKeyId']: return 42 # Connect to IAM APIs iam_client = connect_service('iam', credentials) if not iam_client: return 42 # Initialize and compile the list of regular expression for category groups category_regex = init_group_category_regex(args.category_groups, args.category_regex) # Ensure all default groups exist create_groups(iam_client, args.category_groups + args.common_groups) # Download IAM users and their group memberships printInfo('Downloading group membership information...') user_info = {} users = handle_truncated_response(iam_client.list_users, {}, ['Users'])['Users'] show_status(user_info, total=len(users), newline=False) thread_work(users, get_group_membership, { 'iam_client': iam_client, 'user_info': user_info }, num_threads=30) show_status(user_info) # Iterate through users for user in user_info: printInfo('Checking configuration of \'%s\'...' % user) for group in args.common_groups: if group not in user_info[user]['groups']: printInfo(' - Adding to common group: %s' % group) iam_client.add_user_to_group(UserName=user, GroupName=group) category_found = False for i, regex in enumerate(category_regex): if regex and regex.match(user): category_found = True group = args.category_groups[i] if group not in user_info[user]['groups']: printInfo(' - Adding to category group: %s' % group) iam_client.add_user_to_group(UserName=user, GroupName=group) elif not regex: default_group = args.category_groups[i] if not category_found and default_group not in user_info[user][ 'groups']: printInfo(' - Adding to default category group: %s' % default_group) iam_client.add_user_to_group(UserName=user, GroupName=default_group)
def list_accounts_for_parent(api_client, parent): return handle_truncated_response(api_client.list_accounts_for_parent, {'ParentId': parent['Id']}, ['Accounts'])['Accounts']
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions', help = 'Regions where stack instances will be created.') parser.add_argument('partition-name') parser.parser.add_argument('--stack-set-region', dest='stack_set_region', default=None, required=True, help='Region where the stack set will be created.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Validate the stack set region regions = build_region_list('events', args.regions, args.partition_name) if args.stack_set_region not in regions: printError('Error, the stack set region \'%s\' is not valid. Acceptable values are:' % args.stack_set_region) printError(', '.join(regions)) return 42 # Determine the master account id to exclude it from the list of accounts to be configured for event forwarding monitoring_account_id = get_aws_account_id(credentials) # Connect to the AWS Organizations API api_client = connect_service('organizations', credentials) # List all accounts in the organization org_account_ids = [] org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] org_account_ids = [ account['Id'] for account in org_accounts if account['Status'] == 'ACTIVE' and account['Id'] != monitoring_account_id ] printInfo('Found %d accounts in the organization.' % len(org_account_ids)) printDebug(str(org_account_ids)) # Verify that the account has been configured for stack sets by attempting to assume the stack set execution role api_client = connect_service('sts', credentials, silent = True) configured_org_account_ids = [] for account_id in org_account_ids: try: role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id api_client.assume_role(RoleArn = role_arn, RoleSessionName = 'foobar') configured_org_account_ids.append(account_id) except Exception as e: pass if len(configured_org_account_ids) != len(org_account_ids): printInfo('Only %d of these accounts have the necessary stack set execution role:' % len(configured_org_account_ids)) printInfo(str(configured_org_account_ids)) # For each region with cloudwatch events, put a permission for each account printInfo('Adding permissions on the default event buses...') for region in regions: api_client = connect_service('events', credentials, region) for account in org_accounts: account_id = account['Id'] if account_id not in configured_org_account_ids: continue account_name = account['Name'] api_client.put_permission(Action = 'events:PutEvents', Principal = account_id, StatementId = 'AWSRecipesAllow%s' % account_id) # Create the stack set try: stack_set_name = 'CloudwatchEventsForwarding' api_client = connect_service('cloudformation', credentials, args.stack_set_region) # TBD: need for the region where the stack set is created and the regions where the stack instances are created... template_path = os.path.join((os.path.dirname(os.path.realpath(__file__))), '../CloudFormationTemplates/CloudwatchEventsForwarding.region.yml') with open(template_path, 'rt') as f: template_body = f.read() template_parameters = [ {'ParameterKey': 'EventsMonitoringAccountID', 'ParameterValue': get_aws_account_id(credentials) } ] printInfo('Creating the stack set...') response = api_client.create_stack_set(StackSetName = stack_set_name, TemplateBody = template_body, Parameters = template_parameters) except Exception as e: if e.response['Error']['Code'] != 'NameAlreadyExistsException': printException(e) printError('Failed to create the stack set.') return 42 # Create the stack instances: one per region in every account operation_preferences = { 'FailureTolerancePercentage': 100, 'MaxConcurrentPercentage': 100 } response = api_client.create_stack_instances(StackSetName = stack_set_name, Accounts = configured_org_account_ids, Regions = regions, OperationPreferences = operation_preferences) printInfo('Successfully started operation Id %s' % response['OperationId'])
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('user-name', help='Name of the IAM user(s).') parser.parser.add_argument('--all-users', dest='all_users', default=False, action='store_true', help='Go through all IAM users') parser.parser.add_argument( '--arn', dest='arn', default=[], nargs='+', help='ARN of the target group(s), role(s), or user(s)') parser.parser.add_argument('--group-name', dest='group_name', default=[], nargs='+', help='Name of the IAM group(s)') parser.parser.add_argument('--all-groups', dest='all_groups', default=False, action='store_true', help='Go through all IAM groups') parser.parser.add_argument('--role-name', dest='role_name', default=[], nargs='+', help='Name of the IAM role(s)') parser.parser.add_argument('--all-roles', dest='all_roles', default=False, action='store_true', help='Go through all IAM roles') parser.parser.add_argument('--policy-arn', dest='policy_arn', default=[], nargs='+', help='ARN of the IAM policy/ies') parser.parser.add_argument('--all', dest='all', default=False, action='store_true', help='Go through all IAM resources') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Arguments profile_name = args.profile[0] # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Connect to IAM iam_client = connect_service('iam', credentials) if not iam_client: return 42 # Normalize targets targets = [] for arn in args.arn: arn_match = re_arn.match(arn) if arn_match: resource = arn_match.groups()[4].split('/') targets.append((resource[0], resource[-1])) for group_name in args.group_name: if group_name: targets.append(('group', group_name)) for role_name in args.role_name: if role_name: targets.append(('role', role_name)) for user_name in args.user_name: if user_name: targets.append(('user', user_name)) if args.all or args.all_groups: printInfo('Fetching all IAM groups...') for group in handle_truncated_response(iam_client.list_groups, {}, ['Groups'])['Groups']: targets.append(('group', group['GroupName'])) if args.all or args.all_roles: printInfo('Fetching all IAM roles...') for role in handle_truncated_response(iam_client.list_roles, {}, ['Roles'])['Roles']: targets.append(('role', role['RoleName'])) if args.all or args.all_users: printInfo('Fetching all IAM users...') for user in handle_truncated_response(iam_client.list_users, {}, ['Users'])['Users']: targets.append(('user', user['UserName'])) # Get all policies that apply to the targets and aggregate them into a single file printInfo('Fetching all inline and managed policies in use...') managed_policies = {} for resource_type, resource_name in targets: policy_documents = get_policies(iam_client, managed_policies, resource_type, resource_name) write_permissions(merge_policies(policy_documents), resource_type, resource_name) # Get requested managed policies for policy_arn in args.policy_arn: policy_documents = [ get_managed_policy_document(iam_client, policy_arn, managed_policies) ] write_permissions(merge_policies(policy_documents), 'policy', policy_arn)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('force') parser.add_argument('dry-run') parser.add_argument('regions') parser.add_argument('partition-name') parser.parser.add_argument('--interactive', dest='interactive', default=False, action='store_true', help='Interactive prompt to manually enter CIDRs.') parser.parser.add_argument('--csv-ip-ranges', dest='csv_ip_ranges', default=[], nargs='+', help='CSV file(s) containing CIDRs information.') parser.parser.add_argument('--skip-first-line', dest='skip_first_line', default=False, action='store_true', help='Skip first line when parsing CSV file.') parser.parser.add_argument('--attributes', dest='attributes', default=[], nargs='+', help='Name of the attributes to enter for each CIDR.') parser.parser.add_argument('--mappings', dest='mappings', default=[], nargs='+', help='Column number matching attributes when headers differ.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Initialize the list of regions to work with regions = build_region_list('ec2', args.regions, args.partition_name) # For each profile/environment... for profile_name in args.profile: # Interactive mode if args.interactive: # Initalize prefixes attributes = args.attributes filename = 'ip-ranges-%s.json' % profile_name if os.path.isfile(filename): printInfo('Loading existing IP ranges from %s' % filename) prefixes = read_ip_ranges(filename) # Initialize attributes from existing values if attributes == []: for prefix in prefixes: for key in prefix: if key not in attributes: attributes.append(key) else: prefixes = [] # IP prefix does not need to be specified as an attribute attributes = [a for a in attributes if a != 'ip_prefix'] # Prompt for new entries while prompt_4_yes_no('Add a new IP prefix to the ip ranges'): ip_prefix = prompt_4_value('Enter the new IP prefix:') obj = {} for a in attributes: obj[a] = prompt_4_value('Enter the \'%s\' value:' % a) prefixes.append(new_prefix(ip_prefix, obj)) # Support loading from CSV file elif len(args.csv_ip_ranges) > 0: # Initalize prefixes prefixes = [] # Load CSV file contents for filename in args.csv_ip_ranges: with open(filename, 'rt') as f: csv_contents = f.readlines() # Initialize mappings attributes = args.attributes mappings = {} if attributes == []: # Follow structure of first line headers = csv_contents.pop(0).strip().split(',') for index, attribute in enumerate(headers): mappings[attribute] = index elif attributes and args.mappings == []: # Follow structure of first line but only map a subset of fields headers = csv_contents.pop(0).strip().split(',') attributes.append('ip_prefix') for attribute in set(attributes): mappings[attribute] = headers.index(attribute) else: # Indices of columns are provided as an argument for index, attribute in enumerate(attributes): mappings[attribute] = int(args.mappings[index]) if args.skip_first_line: csv_contents.pop(0) # For each line... for line in csv_contents: ip_prefix = {} values = line.strip().split(',') if len(values) < len(mappings): continue for attribute in mappings: ip_prefix[attribute] = values[mappings[attribute]] if 'ip_prefix' in mappings and 'mask' in mappings: ip = ip_prefix.pop('ip_prefix') mask = ip_prefix.pop('mask') ip_prefix['ip_prefix'] = '%s/%s' % (ip, mask.replace('/','')) prefixes.append(ip_prefix) # AWS mode else: # Initialize IP addresses printInfo('Fetching public IP information for the \'%s\' environment...' % profile_name) ip_addresses = {} # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # For each region... for region in regions: # Connect to EC2 ec2_client = connect_service('ec2', credentials, region) if not ec2_client: continue # Get public IP addresses associated with EC2 instances printInfo('...in %s: EC2 instances' % region) reservations = handle_truncated_response(ec2_client.describe_instances, {}, ['Reservations']) for reservation in reservations['Reservations']: for i in reservation['Instances']: if 'PublicIpAddress' in i: ip_addresses[i['PublicIpAddress']] = new_ip_info(region, i['InstanceId'], False) get_name(i, ip_addresses[i['PublicIpAddress']], 'InstanceId') if 'NetworkInterfaces' in i: for eni in i['NetworkInterfaces']: if 'Association' in eni: ip_addresses[eni['Association']['PublicIp']] = new_ip_info(region, i['InstanceId'], False) # At that point, we don't know whether it's an EIP or not... get_name(i, ip_addresses[eni['Association']['PublicIp']], 'InstanceId') # Get all EIPs (to handle unassigned cases) printInfo('...in %s: Elastic IP addresses' % region) eips = handle_truncated_response(ec2_client.describe_addresses, {}, ['Addresses']) for eip in eips['Addresses']: instance_id = eip['InstanceId'] if 'InstanceId' in eip else None # EC2-Classic non associated EIPs have an empty string for instance ID (instead of lacking the attribute in VPC) if instance_id == '': instance_id = None ip_addresses[eip['PublicIp']] = new_ip_info(region, instance_id, True) ip_addresses[eip['PublicIp']]['name'] = instance_id # Format prefixes = [] for ip in ip_addresses: prefixes.append(new_prefix(ip, ip_addresses[ip])) # Generate an ip-ranges-<profile>.json file save_ip_ranges(profile_name, prefixes, args.force_write, args.debug)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions') parser.add_argument('partition-name') parser.parser.add_argument('--filters', dest='filters', default=None, help='') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Build list of region regions = build_region_list('ec2', args.regions, args.partition_name) printInfo(str(regions)) # Build filters filters = json.loads(args.filters) if args.filters else None # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # List all EC2 instances instances = [] for region in regions: printInfo('Fetching instances in %s...' % region) ec2_client = connect_service('ec2', credentials, region_name=region) args = {'Filters': filters} if filters else {} reservations = handle_truncated_response( ec2_client.describe_instances, args, ['Reservations'])['Reservations'] for r in reservations: instances += r['Instances'] printInfo(' Found %d instances' % len(instances)) # Build list of private and public IPs prvips = {} pubips = {} for i in instances: security_groups = i['SecurityGroups'] for eni in i['NetworkInterfaces']: for prvip in eni['PrivateIpAddresses']: prvips[prvip['PrivateIpAddress']] = { 'security_groups': security_groups } if 'Association' in prvip: pubips[prvip['Association']['PublicIp']] = { 'security_groups': security_groups } # Create target files with open('targets-%s-prv.txt' % profile_name, 'wt') as f: for prvip in prvips: f.write('%s\n' % prvip) with open('targets-%s-pub.txt' % profile_name, 'wt') as f: for pubip in pubips: f.write('%s\n' % pubip)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions') parser.add_argument('partition-name') parser.add_argument('bucket-name') parser.parser.add_argument('--aws-account-id', dest='aws_account_id', default=[ None ], nargs='+', help='Bleh.') parser.parser.add_argument('--from', dest='from_date', default=[ None ], nargs='+', help='Bleh.') parser.parser.add_argument('--to', dest='to_date', default=[ None ], nargs='+', help='Bleh.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Arguments profile_name = args.profile[0] try: from_date = datetime.datetime.strptime(args.from_date[0], "%Y/%m/%d").date() to_date = datetime.datetime.strptime(args.to_date[0], "%Y/%m/%d").date() delta = to_date - from_date except Exception as e: printException(e) printError('Error: dates must be formatted of the following format YYYY/MM/DD') return 42 if delta.days < 0: printError('Error: your \'to\' date is earlier than your \'from\' date') return 42 # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Fetch AWS account ID if not args.aws_account_id[0]: printInfo('Fetching the AWS account ID...') aws_account_id = get_aws_account_id(credentials) else: aws_account_id = args.aws_account_id[0] global cloudtrail_log_path cloudtrail_log_path = cloudtrail_log_path.replace('AWS_ACCOUNT_ID', aws_account_id) # Create download dir if not os.path.exists(download_folder): os.makedirs(download_folder) # Iterate through regions s3_clients = {} for region in build_region_list('cloudtrail', args.regions, args.partition_name): # Connect to CloudTrail cloudtrail_client = connect_service('cloudtrail', credentials, region) if not cloudtrail_client: continue # Get information about the S3 bucket that receives CloudTrail logs trails = cloudtrail_client.describe_trails() for trail in trails['trailList']: bucket_name = trail['S3BucketName'] prefix = trail['S3KeyPrefix'] if 'S3KeyPrefix' in trail else '' # Connect to S3 manage_dictionary(s3_clients, region, connect_service('s3', credentials, region)) target_bucket_region = get_s3_bucket_location(s3_clients[region], bucket_name) manage_dictionary(s3_clients, target_bucket_region, connect_service('s3', credentials, target_bucket_region)) s3_client = s3_clients[target_bucket_region] # Generate base path for files log_path = os.path.join(prefix, cloudtrail_log_path.replace('REGION', region)) # Download files printInfo('Downloading log files in %s... ' % region, False) keys = [] for i in range(delta.days + 1): day = from_date + timedelta(days=i) folder_path = os.path.join(log_path, day.strftime("%Y/%m/%d")) try: objects = handle_truncated_response(s3_client.list_objects, {'Bucket': bucket_name, 'Prefix': folder_path}, ['Contents']) for o in objects['Contents']: keys.append([o['Key'], 0]) except Exception as e: printException(e) pass thread_work(keys, download_object, params = {'Bucket': bucket_name, 'S3Client': s3_client}, num_threads = 100) printInfo('Done') # Iterate through files and gunzip 'em printInfo('Decompressing files...') gzlogs = [] for root, dirnames, filenames in os.walk(download_folder): for filename in filenames: gzlogs.append(filename) thread_work(gzlogs, gunzip_file, num_threads = 30)