def get_s3_acls(api_client, bucket_name, bucket, key_name = None): try: grantees = {} if key_name: grants = api_client.get_object_acl(Bucket = bucket_name, Key = key_name) else: grants = api_client.get_bucket_acl(Bucket = bucket_name) for grant in grants['Grants']: if 'ID' in grant['Grantee']: grantee = grant['Grantee']['ID'] display_name = grant['Grantee']['DisplayName'] if 'DisplayName' in grant['Grantee'] else grant['Grantee']['ID'] elif 'URI' in grant['Grantee']: grantee = grant['Grantee']['URI'].split('/')[-1] display_name = s3_group_to_string(grant['Grantee']['URI']) else: grantee = display_name = 'Unknown' permission = grant['Permission'] manage_dictionary(grantees, grantee, {}) grantees[grantee]['DisplayName'] = display_name if 'URI' in grant['Grantee']: grantees[grantee]['URI'] = grant['Grantee']['URI'] manage_dictionary(grantees[grantee], 'permissions', init_s3_permissions()) set_s3_permissions(grantees[grantee]['permissions'], permission) return grantees except Exception as e: printError('Failed to get ACL configuration for %s: %s' % (bucket_name, e)) return {}
def fetch_credential_report(self, credentials, ignore_exception = False): """ Fetch the credential report :param: api_client :type: FOO :param: ignore_exception : initiate credential report creation as not always ready :type: Boolean """ iam_report = {} try: api_client = connect_service('iam', credentials, silent = True) response = api_client.generate_credential_report() if response['State'] != 'COMPLETE': if not ignore_exception: printError('Failed to generate a credential report.') return report = api_client.get_credential_report()['Content'] lines = report.splitlines() keys = lines[0].decode('utf-8').split(',') for line in lines[1:]: values = line.decode('utf-8').split(',') manage_dictionary(iam_report, values[0], {}) for key, value in zip(keys, values): iam_report[values[0]][key] = value self.credential_report = iam_report self.fetchstatuslogger.counts['credential_report']['fetched'] = 1 except Exception as e: if ignore_exception: return printError('Failed to download a credential report.') printException(e)
def parse_snapshot(self, global_params, region, dbs): """ :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param dbs: Snapshot :return: """ vpc_id = dbs['VpcId'] if 'VpcId' in dbs else ec2_classic snapshot_id = dbs.pop('DBSnapshotIdentifier') snapshot = {'arn': dbs.pop('DBSnapshotArn'), 'id': snapshot_id, 'name': snapshot_id, 'vpc_id': vpc_id} attributes = [ 'DBInstanceIdentifier', 'SnapshotCreateTime', 'Encrypted', 'OptionGroupName' ] for attribute in attributes: snapshot[attribute] = dbs[attribute] if attribute in dbs else None api_client = api_clients[region] attributes = api_client.describe_db_snapshot_attributes(DBSnapshotIdentifier = snapshot_id)['DBSnapshotAttributesResult'] snapshot['attributes'] = attributes['DBSnapshotAttributes'] if 'DBSnapshotAttributes' in attributes else {} # Save manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].snapshots[snapshot_id] = snapshot
def process_vpc_peering_connections_callback(aws_config, current_config, path, current_path, pc_id, callback_args): """ Create a list of peering connection IDs in each VPC :param aws_config: :param current_config: :param path: :param current_path: :param pc_id: :param callback_args: :return: """ info = 'AccepterVpcInfo' if current_config['AccepterVpcInfo']['OwnerId'] == aws_config['aws_account_id'] else 'RequesterVpcInfo' region = current_path[current_path.index('regions')+1] vpc_id = current_config[info]['VpcId'] target = aws_config['services']['vpc']['regions'][region]['vpcs'][vpc_id] manage_dictionary(target, 'peering_connections', []) if pc_id not in target['peering_connections']: target['peering_connections'].append(pc_id) # VPC information for the peer'd VPC current_config['peer_info'] = copy.deepcopy(current_config['AccepterVpcInfo' if info == 'RequesterVpcInfo' else 'RequesterVpcInfo']) if 'PeeringOptions' in current_config['peer_info']: current_config['peer_info'].pop('PeeringOptions') if 'organization' in aws_config and current_config['peer_info']['OwnerId'] in aws_config['organization']: current_config['peer_info']['name'] = aws_config['organization'][current_config['peer_info']['OwnerId']]['Name'] else: current_config['peer_info']['name'] = current_config['peer_info']['OwnerId']
def match_instances_and_roles(aws_config): """ Foobar :param aws_config: """ printInfo('Matching EC2 instances and IAM roles...') ec2_config = aws_config['services']['ec2'] iam_config = aws_config['services']['iam'] role_instances = {} for r in ec2_config['regions']: for v in ec2_config['regions'][r]['vpcs']: if 'instances' in ec2_config['regions'][r]['vpcs'][v]: for i in ec2_config['regions'][r]['vpcs'][v]['instances']: instance_profile = ec2_config['regions'][r]['vpcs'][v]['instances'][i]['IamInstanceProfile'] instance_profile_id = instance_profile['Id'] if instance_profile else None if instance_profile_id: manage_dictionary(role_instances, instance_profile_id, []) role_instances[instance_profile_id].append(i) for role_id in iam_config['roles']: iam_config['roles'][role_id]['instances_count'] = 0 for instance_profile_id in iam_config['roles'][role_id]['instance_profiles']: if instance_profile_id in role_instances: iam_config['roles'][role_id]['instance_profiles'][instance_profile_id]['instances'] = role_instances[instance_profile_id] iam_config['roles'][role_id]['instances_count'] += len(role_instances[instance_profile_id])
def __get_inline_policies(self, api_client, iam_resource_type, resource_id, resource_name): fetched_policies = {} get_policy_method = getattr(api_client, 'get_' + iam_resource_type + '_policy') list_policy_method = getattr(api_client, 'list_' + iam_resource_type + '_policies') args = {} args[iam_resource_type.title() + 'Name'] = resource_name try: policy_names = list_policy_method(**args)['PolicyNames'] except Exception as e: if is_throttled(e): raise e else: printException(e) return fetched_policies try: for policy_name in policy_names: args['PolicyName'] = policy_name policy_document = get_policy_method(**args)['PolicyDocument'] policy_id = self.get_non_aws_id(policy_name) manage_dictionary(fetched_policies, policy_id, {}) fetched_policies[policy_id]['PolicyDocument'] = policy_document fetched_policies[policy_id]['name'] = policy_name self.__parse_permissions(policy_id, policy_document, 'inline_policies', iam_resource_type + 's', resource_id) except Exception as e: if is_throttled(e): raise e else: printException(e) return fetched_policies
def parse_roles(self, fetched_role, params): """ Parse a single IAM role and fetch additional data """ role = {} role['instances_count'] = 'N/A' # When resuming upon throttling error, skip if already fetched if fetched_role['RoleName'] in self.roles: return api_client = params['api_client'] # Ensure consistent attribute names across resource types role['id'] = fetched_role.pop('RoleId') role['name'] = fetched_role.pop('RoleName') role['arn'] = fetched_role.pop('Arn') # Get other attributes get_keys(fetched_role, role, [ 'CreateDate', 'Path']) # Get role policies policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name']) if len(policies): role['inline_policies'] = policies role['inline_policies_count'] = len(policies) # Get instance profiles profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles']) manage_dictionary(role, 'instance_profiles', {}) for profile in profiles['InstanceProfiles']: manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {}) role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn'] role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName'] # Get trust relationship role['assume_role_policy'] = {} role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument') # Save role self.roles[role['id']] = role
def parse_instance(self, global_params, region, dbi): """ Parse a single RDS instance :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param instance: Instance """ vpc_id = dbi['DBSubnetGroup']['VpcId'] if 'DBSubnetGroup' in dbi and 'VpcId' in dbi['DBSubnetGroup'] and dbi['DBSubnetGroup']['VpcId'] else ec2_classic instance = {} instance['name'] = dbi.pop('DBInstanceIdentifier') for key in ['InstanceCreateTime', 'Engine', 'DBInstanceStatus', 'AutoMinorVersionUpgrade', 'DBInstanceClass', 'MultiAZ', 'Endpoint', 'BackupRetentionPeriod', 'PubliclyAccessible', 'StorageEncrypted', 'VpcSecurityGroups', 'DBSecurityGroups', 'DBParameterGroups', 'EnhancedMonitoringResourceArn', 'StorageEncrypted']: # parameter_groups , security_groups, vpc_security_groups instance[key] = dbi[key] if key in dbi else None # If part of a cluster, multi AZ information is only available via cluster information if 'DBClusterIdentifier' in dbi: api_client = api_clients[region] cluster = api_client.describe_db_clusters(DBClusterIdentifier = dbi['DBClusterIdentifier'])['DBClusters'][0] instance['MultiAZ'] = cluster['MultiAZ'] # Save manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].instances[instance['name']] = instance
def match_instances_and_subnets_callback(aws_config, current_config, path, current_path, instance_id, callback_args): subnet_id = current_config['SubnetId'] vpc = subnet_map[subnet_id] subnet = aws_config['services']['vpc']['regions'][vpc['region']]['vpcs'][vpc['vpc_id']]['subnets'][subnet_id] manage_dictionary(subnet, 'instances', []) if instance_id not in subnet['instances']: subnet['instances'].append(instance_id)
def parse_route_table(self, global_params, region, rt): route_table = {} vpc_id = rt['VpcId'] get_name(rt, route_table, 'VpcId') # TODO: change get_name to have src then dst get_keys(rt, route_table, ['Routes', 'Associations', 'PropagatingVgws']) # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].route_tables[rt['RouteTableId']] = route_table
def __init__(self, ruleset): # Organize rules by path self.ruleset = ruleset self.rules = {} for filename in self.ruleset.rules: for rule in self.ruleset.rules[filename]: if not rule.enabled: continue manage_dictionary(self.rules, rule.path, []) self.rules[rule.path].append(rule)
def list_ec2_network_attack_surface_callback(aws_config, current_config, path, current_path, privateip_id, callback_args): manage_dictionary(aws_config['services']['ec2'], 'external_attack_surface', {}) if 'Association' in current_config and current_config['Association']: public_ip = current_config['Association']['PublicIp'] security_group_to_attack_surface(aws_config, aws_config['services']['ec2']['external_attack_surface'], public_ip, current_path, [g['GroupId'] for g in current_config['Groups']], []) # IPv6 if 'Ipv6Addresses' in current_config and len(current_config['Ipv6Addresses']) > 0: for ipv6 in current_config['Ipv6Addresses']: ip = ipv6['Ipv6Address'] security_group_to_attack_surface(aws_config, aws_config['services']['ec2']['external_attack_surface'], ip, current_path, [g['GroupId'] for g in current_config['Groups']], [])
def list_instances_in_security_groups(region_info): for vpc in region_info['vpcs']: if not 'instances' in region_info['vpcs'][vpc]: return for instance in region_info['vpcs'][vpc]['instances']: state = region_info['vpcs'][vpc]['instances'][instance]['State']['Name'] for sg in region_info['vpcs'][vpc]['instances'][instance]['security_groups']: sg_id = sg['GroupId'] manage_dictionary(region_info['vpcs'][vpc]['security_groups'][sg_id], 'instances', {}) manage_dictionary(region_info['vpcs'][vpc]['security_groups'][sg_id]['instances'], state, []) region_info['vpcs'][vpc]['security_groups'][sg_id]['instances'][state].append(instance)
def store_target(self, global_params, region, target): target_type = target.pop('scout2_target_type') if 'VpcId' in target: vpc_id = target.pop('VpcId') manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) tmp = getattr(self, 'vpcs')[vpc_id] target_dict = getattr(tmp, target_type) else: target_dict = getattr(self, target_type) target_id = target[resource_id_map[target_type]] get_name(target, target, resource_id_map[target_type]) target_dict[target_id] = target
def parse_vpc(self, global_params, region_name, vpc): """ :param global_params: :param region_name: :param vpc: :return: """ vpc_id = vpc['VpcId'] # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].name = get_name(vpc, {}, 'VpcId')
def parse_cluster(self, global_params, region, cluster): """ Parse a single Redshift cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Cluster """ vpc_id = cluster.pop('VpcId') if 'VpcId' in cluster else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) name = cluster.pop('ClusterIdentifier') cluster['name'] = name self.vpcs[vpc_id].clusters[name] = cluster
def get_db_attack_surface(aws_config, current_config, path, current_path, db_id, callback_args): service = current_path[1] service_config = aws_config['services'][service] manage_dictionary(service_config, 'external_attack_surface', {}) if (service == 'redshift' or service == 'rds') and 'PubliclyAccessible' in current_config and current_config['PubliclyAccessible']: public_dns = current_config['Endpoint']['Address'] listeners = [ current_config['Endpoint']['Port'] ] security_groups = current_config['VpcSecurityGroups'] security_group_to_attack_surface(aws_config, service_config['external_attack_surface'], public_dns, current_path, [g['VpcSecurityGroupId'] for g in security_groups], listeners) elif 'ConfigurationEndpoint' in current_config: public_dns = current_config['ConfigurationEndpoint']['Address'].replace('.cfg', '') # TODO : get the proper addresss listeners = [ current_config['ConfigurationEndpoint']['Port'] ] security_groups = current_config['SecurityGroups'] security_group_to_attack_surface(aws_config, service_config['external_attack_surface'], public_dns, current_path, [g['SecurityGroupId'] for g in security_groups], listeners)
def __parse_actions(self, effect, action_string, actions, resource_string, resources, iam_resource_type, iam_resource_name, policy_name, policy_type, condition): for action in actions: manage_dictionary(self.permissions[action_string], action, {}) manage_dictionary(self.permissions[action_string][action], iam_resource_type, {}) manage_dictionary(self.permissions[action_string][action][iam_resource_type], effect, {}) manage_dictionary(self.permissions[action_string][action][iam_resource_type][effect], iam_resource_name, {}) self.__parse_action(effect, action_string, action, resource_string, resources, iam_resource_type, iam_resource_name, policy_name, policy_type, condition)
def parse_cluster(self, global_params, region, cluster): """ Parse a single EMR cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: EMR cluster """ cluster_id = cluster['Id'] cluster = api_clients[region].describe_cluster(ClusterId = cluster_id)['Cluster'] cluster['id'] = cluster.pop('Id') cluster['name'] = cluster.pop('Name') vpc_id = 'TODO' # The EMR API won't disclose the VPC ID, so wait until all configs have been fetch and look up the VPC based on the subnet ID manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_id] = cluster
def finalize(self): # Update permissions for managed policies for policy_id in self.policies: if 'attached_to' in self.policies[policy_id] and len(self.policies[policy_id]['attached_to']) > 0: for entity_type in self.policies[policy_id]['attached_to']: for entity in self.policies[policy_id]['attached_to'][entity_type]: entity['id'] = self.get_id_for_resource(entity_type, entity['name']) entities = getattr(self, entity_type) manage_dictionary(entities[entity['id']], 'policies', []) manage_dictionary(entities[entity['id']], 'policies_counts', 0) entities[entity['id']]['policies'].append(policy_id) entities[entity['id']]['policies_counts'] += 1 self.__parse_permissions(policy_id, self.policies[policy_id]['PolicyDocument'], 'policies', entity_type, entity['id']) else: self.__parse_permissions(policy_id, self.policies[policy_id]['PolicyDocument'], 'policies', None, None) super(IAMConfig, self).finalize()
def get_security_groups_info(rds_client, region_info): groups = rds_client.describe_db_security_groups()['DBSecurityGroups'] manage_dictionary(region_info, 'vpcs', {}) manage_dictionary(region_info['vpcs'], ec2_classic, {}) manage_dictionary(region_info['vpcs'][ec2_classic], 'security_groups', {}) manage_dictionary(region_info, 'security_groups_count', 0) region_info['security_groups_count'] += len(groups) for group in groups: region_info['vpcs'][ec2_classic]['security_groups'][group['DBSecurityGroupName']] = parse_security_group(group)
def __init__(self, targets, add_regions = False): self.targets = [] self.formatted_string = '\r ' self.counts = {} target_names = () if add_regions: targets = ('regions',) + targets for target in targets: target_type = target[0] if type(target) == tuple else target self.targets.append(target_type) manage_dictionary(self.counts, target_type, {'discovered': 0, 'fetched': 0}) # h4ck for credential report.... if target_type == 'credential_report': self.counts[target_type]['discovered'] = 1 target_names += (target_type,) self.formatted_string += ' %18s' self.__out(target_names, True)
def __parse_statement(self, policy_name, statement, policy_type, iam_resource_type, resource_name): # Effect effect = str(statement['Effect']) # Action or NotAction action_string = 'Action' if 'Action' in statement else 'NotAction' if type(statement[action_string]) != list: statement[action_string] = [ statement[action_string] ] # Resource or NotResource resource_string = 'Resource' if 'Resource' in statement else 'NotResource' if type(statement[resource_string]) != list: statement[resource_string] = [ statement[resource_string] ] # Condition condition = statement['Condition'] if 'Condition' in statement else None manage_dictionary(self.permissions, action_string, {}) if iam_resource_type == None: return self.__parse_actions(effect, action_string, statement[action_string], resource_string, statement[resource_string], iam_resource_type, resource_name, policy_name, policy_type, condition)
def parse_network_acl(self, global_params, region, network_acl): """ :param global_params: :param region: :param network_acl: :return: """ vpc_id = network_acl['VpcId'] network_acl['id'] = network_acl.pop('NetworkAclId') get_name(network_acl, network_acl, 'id') manage_dictionary(network_acl, 'rules', {}) network_acl['rules']['ingress'] = self.__parse_network_acl_entries(network_acl['Entries'], False) network_acl['rules']['egress'] = self.__parse_network_acl_entries(network_acl['Entries'], True) network_acl.pop('Entries') # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].network_acls[network_acl['id']] = network_acl
def parse_subnet(self, global_params, region, subnet): """ Parse subnet object. :param global_params: :param region: :param subnet: :return: """ vpc_id = subnet['VpcId'] manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) subnet_id = subnet['SubnetId'] get_name(subnet, subnet, 'SubnetId') # set flow logs that cover this subnet subnet['flow_logs'] = get_subnet_flow_logs_list(self, subnet) # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].subnets[subnet_id] = subnet
def parse_security_group(self, global_params, region, group): """ Parse a single Redsfhit security group :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param security)_group: Security group """ vpc_id = group['VpcId'] if 'VpcId' in group and group['VpcId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) security_group = {} security_group['name'] = group['GroupName'] security_group['id'] = group['GroupId'] security_group['description'] = group['Description'] security_group['owner_id'] = group['OwnerId'] security_group['rules'] = {'ingress': {}, 'egress': {}} security_group['rules']['ingress']['protocols'], security_group['rules']['ingress']['count'] = self.__parse_security_group_rules(group['IpPermissions']) security_group['rules']['egress']['protocols'], security_group['rules']['egress']['count'] = self.__parse_security_group_rules(group['IpPermissionsEgress']) self.vpcs[vpc_id].security_groups[group['GroupId']] = security_group
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] try: parameters = handle_truncated_response(api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] manage_dictionary(parameter_group, 'parameters', {}) for parameter in parameters: if not parameter['IsModifiable']: # Discard non-modifiable parameters continue parameter_name = parameter.pop('ParameterName') parameter_group['parameters'][parameter_name] = parameter except Exception as e: printException(e) printError('Failed fetching DB parameters for %s' % parameter_group['name']) # Save parameter_group_id = self.get_non_aws_id(parameter_group['name']) (self).parameter_groups[parameter_group_id] = parameter_group
def __parse_security_group_rules(self, rules): """ :param self: :param rules: :return: """ protocols = {} rules_count = 0 for rule in rules: ip_protocol = rule['IpProtocol'].upper() if ip_protocol == '-1': ip_protocol = 'ALL' protocols = manage_dictionary(protocols, ip_protocol, {}) protocols[ip_protocol] = manage_dictionary(protocols[ip_protocol], 'ports', {}) # Save the port (single port or range) port_value = 'N/A' if 'FromPort' in rule and 'ToPort' in rule: if ip_protocol == 'ICMP': # FromPort with ICMP is the type of message port_value = icmp_message_types_dict[str(rule['FromPort'])] elif rule['FromPort'] == rule['ToPort']: port_value = str(rule['FromPort']) else: port_value = '%s-%s' % (rule['FromPort'], rule['ToPort']) manage_dictionary(protocols[ip_protocol]['ports'], port_value, {}) # Save grants, values are either a CIDR or an EC2 security group for grant in rule['UserIdGroupPairs']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'security_groups', []) protocols[ip_protocol]['ports'][port_value]['security_groups'].append(grant) rules_count = rules_count + 1 for grant in rule['IpRanges']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'cidrs', []) protocols[ip_protocol]['ports'][port_value]['cidrs'].append({'CIDR': grant['CidrIp']}) rules_count = rules_count + 1 # IPv6 for grant in rule['Ipv6Ranges']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'cidrs', []) protocols[ip_protocol]['ports'][port_value]['cidrs'].append({'CIDR': grant['CidrIpv6']}) rules_count = rules_count + 1 return protocols, rules_count
def security_group_to_attack_surface(aws_config, attack_surface_config, public_ip, current_path, security_groups, listeners = []): manage_dictionary(attack_surface_config, public_ip, {'protocols': {}}) for sg_id in security_groups: sg_path = copy.deepcopy(current_path[0:6]) sg_path[1] = 'ec2' sg_path.append('security_groups') sg_path.append(sg_id) sg_path.append('rules') sg_path.append('ingress') ingress_rules = get_object_at(aws_config, sg_path) if 'protocols' in ingress_rules: for p in ingress_rules['protocols']: for port in ingress_rules['protocols'][p]['ports']: if len(listeners) == 0 and 'cidrs' in ingress_rules['protocols'][p]['ports'][port]: manage_dictionary(attack_surface_config[public_ip]['protocols'], p, {'ports': {}}) manage_dictionary(attack_surface_config[public_ip]['protocols'][p]['ports'], port, {'cidrs': []}) attack_surface_config[public_ip]['protocols'][p]['ports'][port]['cidrs'] += \ ingress_rules['protocols'][p]['ports'][port]['cidrs'] else: ports = port.split('-') if len(ports) > 1: port_min = int(ports[0]) port_max = int(ports[1]) else: port_min = port_max = port for listener in listeners: listener = int(listener) if listener > port_min and listener < port_max and \ 'cidrs' in ingress_rules['protocols'][p]['ports'][port]: manage_dictionary(attack_surface_config[public_ip]['protocols'], p, {'ports': {}}) manage_dictionary(attack_surface_config[public_ip]['protocols'][p]['ports'], str(listener), {'cidrs': []}) attack_surface_config[public_ip]['protocols'][p]['ports'][str(listener)]['cidrs'] += \ ingress_rules['protocols'][p]['ports'][port]['cidrs']
def parse_cluster(self, global_params, region, cluster): """ Parse a single ElastiCache cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: ElastiCache cluster """ cluster_name = cluster.pop('CacheClusterId') cluster['name'] = cluster_name # Must fetch info about the subnet group to retrieve the VPC ID... if 'CacheSubnetGroupName' in cluster: subnet_group = api_clients[region].describe_cache_subnet_groups(CacheSubnetGroupName = cluster['CacheSubnetGroupName'])['CacheSubnetGroups'][0] vpc_id = subnet_group['VpcId'] else: vpc_id = ec2_classic subnet_group = None manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_name] = cluster if subnet_group: self.vpcs[vpc_id].subnet_groups[subnet_group['CacheSubnetGroupName']] = subnet_group
def init_rules(self, services, ip_ranges, aws_account_id, generator=False): # Load rules from JSON files for rule_metadata in self.ruleset['rules']: # Skip disabled rules if 'enabled' in rule_metadata and rule_metadata['enabled'] in [ 'false', 'False', False ] and not generator: continue # Skip rules that apply to an out-of-scope service rule_details = self.load_json_rule(rule_metadata, ip_ranges, aws_account_id) if not rule_details: continue if 'enabled' in rule_metadata and rule_metadata['enabled']: rule_details['enabled'] = True skip_rule = True for service in services: if rule_details['path'].startswith(service): skip_rule = False # if skip_rule: # continues # Build the rules dictionary path = rule_details['path'] if 'level' in rule_metadata: rule_details['level'] = rule_metadata['level'] key = rule_details[ 'key'] if 'key' in rule_details else rule_metadata['filename'] # Set condition operator if not 'condition_operator' in rule_details: rule_details['condition_operator'] = 'and' # Save details for rule key = key.replace('.json', '').replace(' ', '') manage_dictionary(self.rules, path, {}) self.rules[path][key] = rule_details
def parse_elb(self, global_params, region, lb): """ :param global_params: :param region: :param elb: :return: """ elb = {} elb['name'] = lb.pop('LoadBalancerName') vpc_id = lb['VPCId'] if 'VPCId' in lb and lb['VPCId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) get_keys(lb, elb, [ 'DNSName', 'CreatedTime', 'AvailabilityZones', 'Subnets', 'Scheme' ]) elb['security_groups'] = [] for sg in lb['SecurityGroups']: elb['security_groups'].append({'GroupId': sg}) manage_dictionary(elb, 'listeners', {}) policy_names = [] for l in lb['ListenerDescriptions']: listener = l['Listener'] manage_dictionary(listener, 'policies', []) for policy_name in l['PolicyNames']: policy_id = self.get_non_aws_id(policy_name) listener['policies'].append(policy_id) if policy_id not in self.elb_policies: policy_names.append(policy_name) elb['listeners'][l['Listener']['LoadBalancerPort']] = listener # Fetch LB policies here. This is not ideal, but the alternative is to download all policies and clean up after... if len(policy_names): policies = api_clients[region].describe_load_balancer_policies( LoadBalancerName=elb['name'], PolicyNames=policy_names)['PolicyDescriptions'] for policy in policies: policy['name'] = policy.pop('PolicyName') policy_id = self.get_non_aws_id(policy['name']) self.elb_policies[policy_id] = policy manage_dictionary(elb, 'instances', []) for i in lb['Instances']: elb['instances'].append(i['InstanceId']) self.vpcs[vpc_id].elbs[self.get_non_aws_id(elb['name'])] = elb
def __parse_resource(self, effect, action_string, action, resource_string, resource, iam_resource_type, iam_resource_name, policy_name, policy_type, condition): manage_dictionary( self.permissions[action_string][action][iam_resource_type][effect] [iam_resource_name], resource_string, {}) manage_dictionary( self.permissions[action_string][action][iam_resource_type][effect] [iam_resource_name][resource_string], resource, {}) manage_dictionary( self.permissions[action_string][action][iam_resource_type][effect] [iam_resource_name][resource_string][resource], policy_type, {}) manage_dictionary( self.permissions[action_string][action][iam_resource_type][effect] [iam_resource_name][resource_string][resource][policy_type], policy_name, {}) self.permissions[action_string][action][iam_resource_type][effect][ iam_resource_name][resource_string][resource][policy_type][ policy_name]['condition'] = condition
def list_ec2_network_attack_surface_callback(ec2_config, current_config, path, current_path, privateip_id, callback_args): if 'Association' in current_config and current_config['Association']: public_ip = current_config['Association']['PublicIp'] manage_dictionary(ec2_config, 'attack_surface', {}) manage_dictionary(ec2_config['attack_surface'], public_ip, {'protocols': {}}) for sg_info in current_config['Groups']: sg_id = sg_info['GroupId'] sg_path = copy.deepcopy(current_path[0:4]) sg_path.append('security_groups') sg_path.append(sg_id) sg_path.append('rules') sg_path.append('ingress') ingress_rules = get_object_at(ec2_config, sg_path) public_ip_grants = {} for p in ingress_rules['protocols']: for port in ingress_rules['protocols'][p]['ports']: if 'cidrs' in ingress_rules['protocols'][p]['ports'][port]: manage_dictionary(ec2_config['attack_surface'][public_ip]['protocols'], p, {'ports': {}}) manage_dictionary(ec2_config['attack_surface'][public_ip]['protocols'][p]['ports'], port, {'cidrs': []}) ec2_config['attack_surface'][public_ip]['protocols'][p]['ports'][port]['cidrs'] += ingress_rules['protocols'][p]['ports'][port]['cidrs']
def __parse_security_group_rules(self, rules): """ :param self: :param rules: :return: """ protocols = {} rules_count = 0 for rule in rules: ip_protocol = rule['IpProtocol'].upper() if ip_protocol == '-1': ip_protocol = 'ALL' protocols = manage_dictionary(protocols, ip_protocol, {}) protocols[ip_protocol] = manage_dictionary(protocols[ip_protocol], 'ports', {}) # Save the port (single port or range) port_value = 'N/A' if 'FromPort' in rule and 'ToPort' in rule: if ip_protocol == 'ICMP': # FromPort with ICMP is the type of message port_value = icmp_message_types_dict[str(rule['FromPort'])] elif rule['FromPort'] == rule['ToPort']: port_value = str(rule['FromPort']) else: port_value = '%s-%s' % (rule['FromPort'], rule['ToPort']) manage_dictionary(protocols[ip_protocol]['ports'], port_value, {}) # Save grants, values are either a CIDR or an EC2 security group for grant in rule['UserIdGroupPairs']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'security_groups', []) protocols[ip_protocol]['ports'][port_value][ 'security_groups'].append(grant) rules_count = rules_count + 1 for grant in rule['IpRanges']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'cidrs', []) protocols[ip_protocol]['ports'][port_value]['cidrs'].append( {'CIDR': grant['CidrIp']}) rules_count = rules_count + 1 return protocols, rules_count
def __update_iam_permissions(s3_info, bucket_name, iam_entity, allowed_iam_entity, policy_info): if bucket_name != '*' and bucket_name in s3_info['buckets']: bucket = s3_info['buckets'][bucket_name] manage_dictionary(bucket, iam_entity, {}) manage_dictionary(bucket, iam_entity + '_count', 0) if not allowed_iam_entity in bucket[iam_entity]: bucket[iam_entity][allowed_iam_entity] = {} bucket[iam_entity + '_count'] = bucket[iam_entity + '_count'] + 1 if 'inline_policies' in policy_info: manage_dictionary(bucket[iam_entity][allowed_iam_entity], 'inline_policies', {}) bucket[iam_entity][allowed_iam_entity]['inline_policies'].update(policy_info['inline_policies']) if 'policies' in policy_info: manage_dictionary(bucket[iam_entity][allowed_iam_entity], 'policies', {}) bucket[iam_entity][allowed_iam_entity]['policies'].update(policy_info['policies']) elif bucket_name == '*': for bucket in s3_info['buckets']: __update_iam_permissions(s3_info, bucket, iam_entity, allowed_iam_entity, policy_info) pass else: # Could be an error or cross-account access, ignore... pass
def sort_vpc_flow_logs_callback(self, current_config, path, current_path, flow_log_id, callback_args): attached_resource = current_config['ResourceId'] if attached_resource.startswith('vpc-'): vpc_path = combine_paths(current_path[0:4], ['vpcs', attached_resource]) try: attached_vpc = get_object_at(self, vpc_path) except Exception as e: printDebug( 'It appears that the flow log %s is attached to a resource that was previously deleted (%s).' % (flow_log_id, attached_resource)) return manage_dictionary(attached_vpc, 'flow_logs', []) if flow_log_id not in attached_vpc['flow_logs']: attached_vpc['flow_logs'].append(flow_log_id) for subnet_id in attached_vpc['subnets']: manage_dictionary(attached_vpc['subnets'][subnet_id], 'flow_logs', []) if flow_log_id not in attached_vpc['subnets'][subnet_id][ 'flow_logs']: attached_vpc['subnets'][subnet_id]['flow_logs'].append( flow_log_id) elif attached_resource.startswith('subnet-'): subnet_path = combine_paths(current_path[0:4], [ 'vpcs', self.subnet_map[attached_resource]['vpc_id'], 'subnets', attached_resource ]) subnet = get_object_at(self, subnet_path) manage_dictionary(subnet, 'flow_logs', []) if flow_log_id not in subnet['flow_logs']: subnet['flow_logs'].append(flow_log_id) # TODO this is pre-merge (from Loic) code # all_vpcs = get_object_at(self, combine_paths(current_path[0:2], ['vpcs'])) # for vpc in self.services['vpc']: # if attached_resource in all_vpcs[vpc]['subnets']: # manage_dictionary(all_vpcs[vpc]['subnets'][attached_resource], 'flow_logs', []) # if flow_log_id not in all_vpcs[vpc]['subnets'][attached_resource]['flow_logs']: # all_vpcs[vpc]['subnets'][attached_resource]['flow_logs'].append(flow_log_id) # break else: printError('Resource %s attached to flow logs is not handled' % attached_resource)
def sort_vpc_flow_logs_callback(vpc_config, current_config, path, current_path, flow_log_id, callback_args): attached_resource = current_config['ResourceId'] if attached_resource.startswith('vpc-'): vpc_path = combine_paths(current_path[0:2], ['vpcs', attached_resource]) try: attached_vpc = get_object_at(vpc_config, vpc_path) except Exception as e: printDebug( 'It appears that the flow log %s is attached to a resource that was previously deleted (%s).' % (flow_log_id, attached_resource)) return manage_dictionary(attached_vpc, 'flow_logs', []) if flow_log_id not in attached_vpc['flow_logs']: attached_vpc['flow_logs'].append(flow_log_id) for subnet_id in attached_vpc['subnets']: manage_dictionary(attached_vpc['subnets'][subnet_id], 'flow_logs', []) if flow_log_id not in attached_vpc['subnets'][subnet_id][ 'flow_logs']: attached_vpc['subnets'][subnet_id]['flow_logs'].append( flow_log_id) elif attached_resource.startswith('subnet-'): all_vpcs = get_object_at(vpc_config, combine_paths(current_path[0:2], ['vpcs'])) for vpc in all_vpcs: if attached_resource in all_vpcs[vpc]['subnets']: manage_dictionary(all_vpcs[vpc]['subnets'][attached_resource], 'flow_logs', []) if flow_log_id not in all_vpcs[vpc]['subnets'][ attached_resource]['flow_logs']: all_vpcs[vpc]['subnets'][attached_resource][ 'flow_logs'].append(flow_log_id) break else: printError('Resource %s attached to flow logs is not handled' % attached_resource)
def manage_vpc(vpc_info, vpc_id): manage_dictionary(vpc_info, vpc_id, {}) vpc_info[vpc_id]['id'] = vpc_id if not 'name' in vpc_info[vpc_id]: vpc_info[vpc_id]['name'] = vpc_id
def get_lb_attack_surface(self, current_config, path, current_path, elb_id, callback_args): public_dns = current_config['DNSName'] elb_config = self.services[current_path[1]] manage_dictionary(elb_config, 'external_attack_surface', {}) if current_path[1] == 'elbv2' and current_config['Type'] == 'network': # Network LBs do not have a security group, lookup listeners instead manage_dictionary(elb_config['external_attack_surface'], public_dns, {'protocols': {}}) for listener in current_config['listeners']: protocol = current_config['listeners'][listener]['Protocol'] manage_dictionary( elb_config['external_attack_surface'][public_dns] ['protocols'], protocol, {'ports': {}}) manage_dictionary( elb_config['external_attack_surface'][public_dns] ['protocols'][protocol]['ports'], listener, {'cidrs': []}) elb_config['external_attack_surface'][public_dns]['protocols'][ protocol]['ports'][listener]['cidrs'].append( {'CIDR': '0.0.0.0/0'}) elif current_path[1] == 'elbv2' and current_config[ 'Scheme'] == 'internet-facing': vpc_id = current_path[5] elb_config['external_attack_surface'][public_dns] = { 'protocols': {} } security_groups = [ g['GroupId'] for g in current_config['security_groups'] ] listeners = [] for listener in current_config['listeners']: listeners.append(listener) self._security_group_to_attack_surface( elb_config['external_attack_surface'], public_dns, current_path, security_groups, listeners) elif current_config['Scheme'] == 'internet-facing': # Classic ELbs do not have a security group, lookup listeners instead public_dns = current_config['DNSName'] manage_dictionary(elb_config['external_attack_surface'], public_dns, {'protocols': { 'TCP': { 'ports': {} } }}) for listener in current_config['listeners']: manage_dictionary( elb_config['external_attack_surface'][public_dns] ['protocols']['TCP']['ports'], listener, {'cidrs': []}) elb_config['external_attack_surface'][public_dns]['protocols'][ 'TCP']['ports'][listener]['cidrs'].append( {'CIDR': '0.0.0.0/0'})
def process_metadata_callbacks(aws_config): """ Iterates through each type of resource and, when callbacks have been configured in the config metadata, recurse through each resource and calls each callback. :param aws_config: The entire AWS configuration object :return: None """ for service_group in aws_config['metadata']: for service in aws_config['metadata'][service_group]: if service == 'summaries': continue # Reset external attack surface if 'summaries' in aws_config['metadata'][service_group][service]: for summary in aws_config['metadata'][service_group][service][ 'summaries']: if summary == 'external attack surface' and service in aws_config[ 'services'] and 'external_attack_surface' in aws_config[ 'services'][service]: aws_config['services'][service].pop( 'external_attack_surface') # Reset all global summaries if 'service_groups' in aws_config: aws_config.pop('service_groups') # Resources for resource_type in aws_config['metadata'][service_group][ service]['resources']: if 'callbacks' in aws_config['metadata'][service_group][ service]['resources'][resource_type]: current_path = ['services', service] target_path = aws_config['metadata'][service_group][ service]['resources'][resource_type]['path'].replace( '.id', '').split('.')[2:] callbacks = aws_config['metadata'][service_group][service][ 'resources'][resource_type]['callbacks'] new_go_to_and_do(aws_config, get_object_at(aws_config, current_path), target_path, current_path, callbacks) # Summaries if 'summaries' in aws_config['metadata'][service_group][service]: for summary in aws_config['metadata'][service_group][service][ 'summaries']: if 'callbacks' in aws_config['metadata'][service_group][ service]['summaries'][summary]: current_path = ['services', service] for callback in aws_config['metadata'][service_group][ service]['summaries'][summary]['callbacks']: callback_name = callback[0] callback_args = copy.deepcopy(callback[1]) target_path = callback_args.pop('path').replace( '.id', '').split('.')[2:] callbacks = [[callback_name, callback_args]] new_go_to_and_do( aws_config, get_object_at(aws_config, current_path), target_path, current_path, callbacks) # Group-level summaries for service_group in aws_config['metadata']: if 'summaries' in aws_config['metadata'][service_group]: for summary in aws_config['metadata'][service_group]['summaries']: current_path = ['services', service] for callback in aws_config['metadata'][service_group][ 'summaries'][summary]['callbacks']: callback_name = callback[0] callback_args = copy.deepcopy(callback[1]) target_path = aws_config['metadata'][service_group][ 'summaries'][summary]['path'].split('.') target_object = aws_config for p in target_path: manage_dictionary(target_object, p, {}) target_object = target_object[p] if callback_name == 'merge': for service in aws_config['metadata'][service_group]: if service == 'summaries': continue if 'summaries' in aws_config['metadata'][ service_group][ service] and summary in aws_config[ 'metadata'][service_group][ service]['summaries']: try: source = get_object_at( aws_config, aws_config['metadata'] [service_group][service]['summaries'] [summary]['path'].split('.')) except: source = {} target_object.update(source)
def run(self, cloud_provider, skip_dashboard=False): # Clean up existing findings for service in cloud_provider.services: cloud_provider.services[service][self.ruleset.rule_type] = {} # Process each rule for finding_path in self._filter_rules(self.rules, cloud_provider.service_list): for rule in self.rules[finding_path]: if not rule.enabled: # or rule.service not in []: # TODO: handle this... continue printDebug('Processing %s rule[%s]: "%s"' % (rule.service, rule.filename, rule.description)) finding_path = rule.path path = finding_path.split('.') service = path[0] manage_dictionary(cloud_provider.services[service], self.ruleset.rule_type, {}) cloud_provider.services[service][self.ruleset.rule_type][ rule.key] = {} cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['description'] = rule.description cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['path'] = rule.path for attr in ['level', 'id_suffix', 'display_path']: if hasattr(rule, attr): cloud_provider.services[service][ self.ruleset.rule_type][rule.key][attr] = getattr( rule, attr) try: setattr(rule, 'checked_items', 0) cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['items'] = recurse(cloud_provider.services, cloud_provider.services, path, [], rule, True) if skip_dashboard: continue cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['dashboard_name'] = rule.dashboard_name cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['checked_items'] = rule.checked_items cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['flagged_items'] = len( cloud_provider.services[service][ self.ruleset.rule_type][rule.key]['items']) cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['service'] = rule.service cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['rationale'] = rule.rationale if hasattr( rule, 'rationale') else 'No description available.' except Exception as e: printException(e) printError('Failed to process rule defined in %s' % rule.filename) # Fallback if process rule failed to ensure report creation and data dump still happen cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['checked_items'] = 0 cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['flagged_items'] = 0
def match_security_groups_and_resources_callback(aws_config, current_config, path, current_path, resource_id, callback_args): service = current_path[1] original_resource_path = combine_paths(copy.deepcopy(current_path), [ resource_id ]) resource = get_object_at(aws_config, original_resource_path) if not 'resource_id_path' in callback_args: resource_type = current_path[-1] resource_path = copy.deepcopy(current_path) resource_path.append(resource_id) else: resource_path = combine_paths(copy.deepcopy(current_path), callback_args['resource_id_path']) resource_id = resource_path[-1] resource_type = resource_path[-2] if 'status_path' in callback_args: status_path = combine_paths(copy.deepcopy(original_resource_path), callback_args['status_path']) resource_status = get_object_at(aws_config, status_path).replace('.', '_') else: resource_status = None unknown_vpc_id = True if current_path[4] != 'vpcs' else False # Issue 89 & 91 : can instances have no security group? try: try: sg_attribute = get_object_at(resource, callback_args['sg_list_attribute_name']) except: return if type(sg_attribute) != list: sg_attribute = [ sg_attribute ] for resource_sg in sg_attribute: if type(resource_sg) == dict: sg_id = resource_sg[callback_args['sg_id_attribute_name']] else: sg_id = resource_sg if unknown_vpc_id: vpc_id = sg_map[sg_id]['vpc_id'] sg_base_path = copy.deepcopy(current_path[0:4]) sg_base_path[1] = 'ec2' sg_base_path = sg_base_path + [ 'vpcs', vpc_id, 'security_groups' ] else: sg_base_path = copy.deepcopy(current_path[0:6]) sg_base_path[1] = 'ec2' sg_base_path.append('security_groups') sg_path = copy.deepcopy(sg_base_path) sg_path.append(sg_id) sg = get_object_at(aws_config, sg_path) # Add usage information manage_dictionary(sg, 'used_by', {}) manage_dictionary(sg['used_by'], service, {}) manage_dictionary(sg['used_by'][service], 'resource_type', {}) manage_dictionary(sg['used_by'][service]['resource_type'], resource_type, {} if resource_status else []) if resource_status: manage_dictionary(sg['used_by'][service]['resource_type'][resource_type], resource_status, []) if not resource_id in sg['used_by'][service]['resource_type'][resource_type][resource_status]: sg['used_by'][service]['resource_type'][resource_type][resource_status].append(resource_id) else: sg['used_by'][service]['resource_type'][resource_type].append(resource_id) except Exception as e: region = current_path[3] vpc_id = current_path[5] if vpc_id == ec2_classic and resource_type == 'elbs': pass else: printError('Failed to parse %s in %s in %s' % (resource_type, vpc_id, region)) printException(e)
def match_security_groups_and_resources_callback(aws_config, current_config, path, current_path, resource_id, callback_args): service = current_path[1] original_resource_path = combine_paths(copy.deepcopy(current_path), [resource_id]) resource = get_object_at(aws_config, original_resource_path) if not 'resource_id_path' in callback_args: resource_type = current_path[-1] resource_path = copy.deepcopy(current_path) resource_path.append(resource_id) else: resource_path = combine_paths(copy.deepcopy(current_path), callback_args['resource_id_path']) resource_id = resource_path[-1] resource_type = resource_path[-2] #print('Resource path: %s' % resource_path) #print('Resource type: %s' % resource_type) #print('Resource id: %s' % resource_id) if 'status_path' in callback_args: status_path = combine_paths(copy.deepcopy(original_resource_path), callback_args['status_path']) #print('Status path: %s' % status_path) resource_status = get_object_at(aws_config, status_path) else: resource_status = None sg_base_path = copy.deepcopy(current_path[0:6]) sg_base_path[1] = 'ec2' sg_base_path.append('security_groups') # Issue 89 & 91 : can instances have no security group? try: for resource_sg in resource[callback_args['sg_list_attribute_name']]: sg_id = resource_sg[callback_args['sg_id_attribute_name']] sg_path = copy.deepcopy(sg_base_path) sg_path.append(sg_id) sg = get_object_at(aws_config, sg_path) # Add usage information manage_dictionary(sg, 'used_by', {}) manage_dictionary(sg['used_by'], service, {}) manage_dictionary(sg['used_by'][service], 'resource_type', {}) manage_dictionary(sg['used_by'][service]['resource_type'], resource_type, {} if resource_status else []) if resource_status: manage_dictionary( sg['used_by'][service]['resource_type'][resource_type], resource_status, []) if not resource_id in sg['used_by'][service]['resource_type'][ resource_type][resource_status]: sg['used_by'][service]['resource_type'][resource_type][ resource_status].append(resource_id) else: sg['used_by'][service]['resource_type'][resource_type].append( resource_id) except Exception as e: region = current_path[3] vpc_id = current_path[5] if vpc_id == ec2_classic and resource_type == 'elbs': pass else: printError('Failed to parse %s in %s in %s' % (resource_type, vpc_id, region)) printException(e)
def analyze(self, aws_config): """ :param aws_config: """ printInfo('Analyzing AWS config...') # TODO: reset violations for all services in scope (maybe this can be done somewhere else (e.g. loading) for finding_path in self.rules: for rule in self.rules[finding_path]: printDebug('Processing %s rule[%s]: "%s"' % (finding_path.split('.')[0], self.rule_type[:-1], self.rules[finding_path][rule]['description'])) path = finding_path.split('.') service = path[0] manage_dictionary(aws_config['services'][service], self.rule_type, {}) aws_config['services'][service][self.rule_type][rule] = {} aws_config['services'][service][ self.rule_type][rule]['description'] = self.rules[ finding_path][rule]['description'] aws_config['services'][service][self.rule_type][rule][ 'path'] = self.rules[finding_path][rule]['path'] if self.rule_type == 'findings': aws_config['services'][service][self.rule_type][rule][ 'level'] = self.rules[finding_path][rule]['level'] if 'id_suffix' in self.rules[finding_path][rule]: aws_config['services'][service][ self.rule_type][rule]['id_suffix'] = self.rules[ finding_path][rule]['id_suffix'] if 'display_path' in self.rules[finding_path][rule]: aws_config['services'][service][ self.rule_type][rule]['display_path'] = self.rules[ finding_path][rule]['display_path'] try: aws_config['services'][service][ self.rule_type][rule]['items'] = recurse( aws_config['services'], aws_config['services'], path, [], self.rules[finding_path][rule], True) aws_config['services'][service][self.rule_type][rule][ 'dashboard_name'] = self.rules[finding_path][rule][ 'dashboard_name'] if 'dashboard_name' in self.rules[ finding_path][rule] else '??' aws_config['services'][service][self.rule_type][rule][ 'checked_items'] = self.rules[finding_path][rule][ 'checked_items'] if 'checked_items' in self.rules[ finding_path][rule] else 0 aws_config['services'][service][ self.rule_type][rule]['flagged_items'] = len( aws_config['services'][service][ self.rule_type][rule]['items']) aws_config['services'][service][ self.rule_type][rule]['service'] = service aws_config['services'][service][self.rule_type][rule][ 'rationale'] = self.rules[finding_path][rule][ 'rationale'] if 'rationale' in self.rules[ finding_path][rule] else 'N/A' except Exception as e: printError('Failed to process rule defined in %s.json' % rule) # Fallback if process rule failed to ensure report creation and data dump still happen aws_config['services'][service][ self.rule_type][rule]['checked_items'] = 0 aws_config['services'][service][ self.rule_type][rule]['flagged_items'] = 0 printException(e)
def recurse(all_info, current_info, target_path, current_path, config, add_suffix=False): """ :param all_info: :param current_info: :param target_path: :param current_path: :param config: :param add_suffix: :return: """ results = [] if len(target_path) == 0: # Dashboard: count the number of processed resources here manage_dictionary(config, 'checked_items', 0) config['checked_items'] = config['checked_items'] + 1 # Test for conditions... if pass_conditions(all_info, current_path, copy.deepcopy(config['conditions'])): if add_suffix and 'id_suffix' in config: current_path.append(config['id_suffix']) results.append('.'.join(current_path)) # Return the flagged items... config['flagged_items'] = len(results) return results target_path = copy.deepcopy(target_path) current_path = copy.deepcopy(current_path) attribute = target_path.pop(0) if type(current_info) == dict: if attribute in current_info: split_path = copy.deepcopy(current_path) split_path.append(attribute) results = results + recurse(all_info, current_info[attribute], target_path, split_path, config, add_suffix) elif attribute == 'id': for key in current_info: split_target_path = copy.deepcopy(target_path) split_current_path = copy.deepcopy(current_path) split_current_path.append(key) split_current_info = current_info[key] results = results + recurse( all_info, split_current_info, split_target_path, split_current_path, config, add_suffix) # To handle lists properly, I would have to make sure the list is properly ordered and I can use the index to consistently access an object... Investigate (or do not use lists) elif type(current_info) == list: for index, split_current_info in enumerate(current_info): split_current_path = copy.deepcopy(current_path) split_current_path.append(str(index)) results = results + recurse(all_info, split_current_info, copy.deepcopy(target_path), split_current_path, config, add_suffix) else: printError('Error: unhandled case, typeof(current_info) = %s' % type(current_info)) printError(str(current_info)) raise Exception return results
def sort_elbs_callback(aws_config, current_config, path, current_path, elb_id, callback_args): vpc_config = get_object_at(aws_config, ['services', 'ec2'] + current_path[:-1]) manage_dictionary(vpc_config, 'elbs', {}) vpc_config['elbs'][elb_id] = current_config
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions') parser.add_argument('partition-name') parser.add_argument('bucket-name') parser.parser.add_argument('--aws-account-id', dest='aws_account_id', default=[ None ], nargs='+', help='Bleh.') parser.parser.add_argument('--from', dest='from_date', default=[ None ], nargs='+', help='Bleh.') parser.parser.add_argument('--to', dest='to_date', default=[ None ], nargs='+', help='Bleh.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Arguments profile_name = args.profile[0] try: from_date = datetime.datetime.strptime(args.from_date[0], "%Y/%m/%d").date() to_date = datetime.datetime.strptime(args.to_date[0], "%Y/%m/%d").date() delta = to_date - from_date except Exception as e: printException(e) printError('Error: dates must be formatted of the following format YYYY/MM/DD') return 42 if delta.days < 0: printError('Error: your \'to\' date is earlier than your \'from\' date') return 42 # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Fetch AWS account ID if not args.aws_account_id[0]: printInfo('Fetching the AWS account ID...') aws_account_id = get_aws_account_id(credentials) else: aws_account_id = args.aws_account_id[0] global cloudtrail_log_path cloudtrail_log_path = cloudtrail_log_path.replace('AWS_ACCOUNT_ID', aws_account_id) # Create download dir if not os.path.exists(download_folder): os.makedirs(download_folder) # Iterate through regions s3_clients = {} for region in build_region_list('cloudtrail', args.regions, args.partition_name): # Connect to CloudTrail cloudtrail_client = connect_service('cloudtrail', credentials, region) if not cloudtrail_client: continue # Get information about the S3 bucket that receives CloudTrail logs trails = cloudtrail_client.describe_trails() for trail in trails['trailList']: bucket_name = trail['S3BucketName'] prefix = trail['S3KeyPrefix'] if 'S3KeyPrefix' in trail else '' # Connect to S3 manage_dictionary(s3_clients, region, connect_service('s3', credentials, region)) target_bucket_region = get_s3_bucket_location(s3_clients[region], bucket_name) manage_dictionary(s3_clients, target_bucket_region, connect_service('s3', credentials, target_bucket_region)) s3_client = s3_clients[target_bucket_region] # Generate base path for files log_path = os.path.join(prefix, cloudtrail_log_path.replace('REGION', region)) # Download files printInfo('Downloading log files in %s... ' % region, False) keys = [] for i in range(delta.days + 1): day = from_date + timedelta(days=i) folder_path = os.path.join(log_path, day.strftime("%Y/%m/%d")) try: objects = handle_truncated_response(s3_client.list_objects, {'Bucket': bucket_name, 'Prefix': folder_path}, ['Contents']) for o in objects['Contents']: keys.append([o['Key'], 0]) except Exception as e: printException(e) pass thread_work(keys, download_object, params = {'Bucket': bucket_name, 'S3Client': s3_client}, num_threads = 100) printInfo('Done') # Iterate through files and gunzip 'em printInfo('Decompressing files...') gzlogs = [] for root, dirnames, filenames in os.walk(download_folder): for filename in filenames: gzlogs.append(filename) thread_work(gzlogs, gunzip_file, num_threads = 30)