def list_instances_in_security_groups(region_info): """ Once all the data has been fetched, iterate through instances and list them Could this be done when all the "used_by" values are set ??? TODO :param region_info: :return: """ for vpc in region_info['vpcs']: if 'instances' not in region_info['vpcs'][vpc]: return for instance in region_info['vpcs'][vpc]['instances']: state = region_info['vpcs'][vpc]['instances'][instance]['State'][ 'Name'] for sg in region_info['vpcs'][vpc]['instances'][instance][ 'security_groups']: sg_id = sg['GroupId'] manage_dictionary( region_info['vpcs'][vpc]['security_groups'][sg_id], 'instances', {}) manage_dictionary( region_info['vpcs'][vpc]['security_groups'][sg_id] ['instances'], state, []) region_info['vpcs'][vpc]['security_groups'][sg_id][ 'instances'][state].append(instance)
def parse_snapshot(self, global_params, region, dbs): """ :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param dbs: Snapshot :return: """ vpc_id = dbs['VpcId'] if 'VpcId' in dbs else ec2_classic snapshot_id = dbs.pop('DBSnapshotIdentifier') snapshot = {'arn': dbs.pop('DBSnapshotArn'), 'id': snapshot_id, 'name': snapshot_id, 'vpc_id': vpc_id} attributes = [ 'DBInstanceIdentifier', 'SnapshotCreateTime', 'Encrypted', 'OptionGroupName' ] for attribute in attributes: snapshot[attribute] = dbs[attribute] if attribute in dbs else None api_client = api_clients[region] attributes = api_client.describe_db_snapshot_attributes(DBSnapshotIdentifier=snapshot_id)[ 'DBSnapshotAttributesResult'] snapshot['attributes'] = attributes['DBSnapshotAttributes'] if 'DBSnapshotAttributes' in attributes else {} # Save manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].snapshots[snapshot_id] = snapshot
def parse_security_group(self, global_params, region, group): """ Parse a single Redsfhit security group :param group: :param global_params: Parameters shared for all regions :param region: Name of the AWS region """ vpc_id = group[ 'VpcId'] if 'VpcId' in group and group['VpcId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) security_group = { 'name': group['GroupName'], 'id': group['GroupId'], 'description': group['Description'], 'owner_id': group['OwnerId'], 'rules': { 'ingress': {}, 'egress': {} } } security_group['rules']['ingress']['protocols'], security_group[ 'rules']['ingress']['count'] = self.__parse_security_group_rules( group['IpPermissions']) security_group['rules']['egress']['protocols'], security_group[ 'rules']['egress']['count'] = self.__parse_security_group_rules( group['IpPermissionsEgress']) self.vpcs[vpc_id].security_groups[group['GroupId']] = security_group
def parse_instance(self, global_params, region, reservation): """ Parse a single EC2 instance :param reservation: :param global_params: Parameters shared for all regions :param region: Name of the AWS region """ for i in reservation['Instances']: instance = {} vpc_id = i['VpcId'] if 'VpcId' in i and i['VpcId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) instance['reservation_id'] = reservation['ReservationId'] instance['id'] = i['InstanceId'] instance['monitoring_enabled'] = i['Monitoring'][ 'State'] == 'enabled' instance['user_data'] = self._get_user_data(region, instance['id']) get_name(i, instance, 'InstanceId') get_keys(i, instance, [ 'KeyName', 'LaunchTime', 'InstanceType', 'State', 'IamInstanceProfile', 'SubnetId' ]) # Network interfaces & security groups manage_dictionary(instance, 'network_interfaces', {}) for eni in i['NetworkInterfaces']: nic = {} get_keys(eni, nic, [ 'Association', 'Groups', 'PrivateIpAddresses', 'SubnetId', 'Ipv6Addresses' ]) instance['network_interfaces'][eni['NetworkInterfaceId']] = nic self.vpcs[vpc_id].instances[i['InstanceId']] = instance
def get_s3_buckets(api_client, s3_info, s3_params): """ List all available buckets :param api_client: :param s3_info: :param s3_params: :return: """ manage_dictionary(s3_info, 'buckets', {}) buckets = api_client[get_s3_list_region(s3_params['selected_regions'])].list_buckets()['Buckets'] targets = [] for b in buckets: # Abort if bucket is not of interest if (b['Name'] in s3_params['skipped_buckets']) or \ (len(s3_params['checked_buckets']) and b['Name'] not in s3_params['checked_buckets']): continue targets.append(b) s3_info['buckets_count'] = len(targets) s3_params['api_clients'] = api_client s3_params['s3_info'] = s3_info # FIXME - commented for now as this method doesn't seem to be defined anywhere' # thread_work(targets, get_s3_bucket, params = s3_params, num_threads = 30) # show_status(s3_info) s3_info['buckets_count'] = len(s3_info['buckets']) return s3_info
def get_s3_acls(api_client, bucket_name, bucket, key_name=None): try: grantees = {} if key_name: grants = api_client.get_object_acl(Bucket=bucket_name, Key=key_name) else: grants = api_client.get_bucket_acl(Bucket=bucket_name) for grant in grants['Grants']: if 'ID' in grant['Grantee']: grantee = grant['Grantee']['ID'] display_name = grant['Grantee']['DisplayName'] if \ 'DisplayName' in grant['Grantee'] else grant['Grantee']['ID'] elif 'URI' in grant['Grantee']: grantee = grant['Grantee']['URI'].split('/')[-1] display_name = s3_group_to_string(grant['Grantee']['URI']) else: grantee = display_name = 'Unknown' permission = grant['Permission'] manage_dictionary(grantees, grantee, {}) grantees[grantee]['DisplayName'] = display_name if 'URI' in grant['Grantee']: grantees[grantee]['URI'] = grant['Grantee']['URI'] manage_dictionary(grantees[grantee], 'permissions', init_s3_permissions()) set_s3_permissions(grantees[grantee]['permissions'], permission) return grantees except Exception as e: print_error('Failed to get ACL configuration for %s: %s' % (bucket_name, e)) return {}
def _match_instances_and_roles(self): print_info('Matching EC2 instances and IAM roles') ec2_config = self.services['ec2'] iam_config = self.services['iam'] role_instances = {} for r in ec2_config['regions']: for v in ec2_config['regions'][r]['vpcs']: if 'instances' in ec2_config['regions'][r]['vpcs'][v]: for i in ec2_config['regions'][r]['vpcs'][v]['instances']: instance_profile = ec2_config['regions'][r]['vpcs'][v][ 'instances'][i]['IamInstanceProfile'] instance_profile_id = instance_profile[ 'Id'] if instance_profile else None if instance_profile_id: manage_dictionary(role_instances, instance_profile_id, []) role_instances[instance_profile_id].append(i) for role_id in iam_config['roles']: iam_config['roles'][role_id]['instances_count'] = 0 for instance_profile_id in iam_config['roles'][role_id][ 'instance_profiles']: if instance_profile_id in role_instances: iam_config['roles'][role_id]['instance_profiles'][instance_profile_id]['instances'] = \ role_instances[instance_profile_id] iam_config['roles'][role_id]['instances_count'] += len( role_instances[instance_profile_id])
def _match_instances_and_roles(self): if self.services.get('ec2') and self.services.get( 'iam'): # validate both services were included in run ec2_config = self.services['ec2'] iam_config = self.services['iam'] role_instances = {} for r in ec2_config['regions']: for v in ec2_config['regions'][r]['vpcs']: if 'instances' in ec2_config['regions'][r]['vpcs'][v]: for i in ec2_config['regions'][r]['vpcs'][v][ 'instances']: instance_profile = ec2_config['regions'][r][ 'vpcs'][v]['instances'][i][ 'IamInstanceProfile'] instance_profile_id = instance_profile[ 'Id'] if instance_profile else None if instance_profile_id: manage_dictionary(role_instances, instance_profile_id, []) role_instances[instance_profile_id].append(i) for role_id in iam_config['roles']: iam_config['roles'][role_id]['instances_count'] = 0 for instance_profile_id in iam_config['roles'][role_id][ 'instance_profiles']: if instance_profile_id in role_instances: iam_config['roles'][role_id]['instance_profiles'][instance_profile_id]['instances'] = \ role_instances[instance_profile_id] iam_config['roles'][role_id]['instances_count'] += len( role_instances[instance_profile_id])
def _match_instances_and_vpcs(self): ec2_instances = self._get_ec2_instances_details(['id', 'vpc', 'region']) # fetch all EC2 instances with only required fields for instance in ec2_instances.values(): vpc = self.services['vpc']['regions'][instance['region']]['vpcs'][instance['vpc']] # find the VPC reference manage_dictionary(vpc, 'instances', []) # initialize instances list for the VPC (if not already set) if instance['id'] not in vpc['instances']: # if instance is not already mapped to the VPC vpc['instances'].append(instance['id']) # append EC2 instance ID to instance list in VPC
def get_db_attack_surface(self, current_config, path, current_path, db_id, callback_args): service = current_path[1] service_config = self.services[service] manage_dictionary(service_config, 'external_attack_surface', {}) if (service == 'redshift' or service == 'rds' ) and 'PubliclyAccessible' in current_config and current_config[ 'PubliclyAccessible']: public_dns = current_config['Endpoint']['Address'] listeners = [current_config['Endpoint']['Port']] security_groups = current_config['VpcSecurityGroups'] self._security_group_to_attack_surface( service_config['external_attack_surface'], public_dns, current_path, [g['VpcSecurityGroupId'] for g in security_groups], listeners) elif 'ConfigurationEndpoint' in current_config: # TODO : get the proper addresss public_dns = current_config['ConfigurationEndpoint'][ 'Address'].replace('.cfg', '') listeners = [current_config['ConfigurationEndpoint']['Port']] security_groups = current_config['SecurityGroups'] self._security_group_to_attack_surface( service_config['external_attack_surface'], public_dns, current_path, [g['SecurityGroupId'] for g in security_groups], listeners)
def process_vpc_peering_connections_callback(self, current_config, path, current_path, pc_id, callback_args): # Create a list of peering connection IDs in each VPC info = 'AccepterVpcInfo' if current_config['AccepterVpcInfo'][ 'OwnerId'] == self.account_id else 'RequesterVpcInfo' region = current_path[current_path.index('regions') + 1] vpc_id = current_config[info]['VpcId'] if vpc_id not in self.services['vpc']['regions'][region]['vpcs']: region = current_config['AccepterVpcInfo']['Region'] # handle edge case where the region wasn't included in the execution if region in self.services['vpc']['regions']: target = self.services['vpc']['regions'][region]['vpcs'][vpc_id] manage_dictionary(target, 'peering_connections', []) if pc_id not in target['peering_connections']: target['peering_connections'].append(pc_id) # VPC information for the peer'd VPC current_config['peer_info'] = copy.deepcopy( current_config['AccepterVpcInfo' if info == 'RequesterVpcInfo' else 'RequesterVpcInfo']) if 'PeeringOptions' in current_config['peer_info']: current_config['peer_info'].pop('PeeringOptions') if hasattr( self, 'organization' ) and current_config['peer_info']['OwnerId'] in self.organization: current_config['peer_info']['name'] = self.organization[ current_config['peer_info']['OwnerId']]['Name'] else: current_config['peer_info']['name'] = current_config['peer_info'][ 'OwnerId']
def parse_instance(self, global_params, region, dbi): """ Parse a single RDS instance :param dbi: :param global_params: Parameters shared for all regions :param region: Name of the AWS region """ vpc_id = dbi['DBSubnetGroup']['VpcId'] if 'DBSubnetGroup' in dbi and 'VpcId' in dbi['DBSubnetGroup'] and \ dbi['DBSubnetGroup']['VpcId'] else ec2_classic instance = {'name': dbi.pop('DBInstanceIdentifier')} for key in ['InstanceCreateTime', 'Engine', 'DBInstanceStatus', 'AutoMinorVersionUpgrade', 'DBInstanceClass', 'MultiAZ', 'Endpoint', 'BackupRetentionPeriod', 'PubliclyAccessible', 'StorageEncrypted', 'VpcSecurityGroups', 'DBSecurityGroups', 'DBParameterGroups', 'EnhancedMonitoringResourceArn', 'StorageEncrypted']: # parameter_groups , security_groups, vpc_security_groups instance[key] = dbi[key] if key in dbi else None # If part of a cluster, multi AZ information is only available via cluster information if 'DBClusterIdentifier' in dbi: api_client = api_clients[region] cluster = api_client.describe_db_clusters(DBClusterIdentifier=dbi['DBClusterIdentifier'])['DBClusters'][0] instance['MultiAZ'] = cluster['MultiAZ'] # Save manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].instances[instance['name']] = instance
def parse_route_table(self, global_params, region, rt): route_table = {} vpc_id = rt['VpcId'] get_name(rt, route_table, 'VpcId') # TODO: change get_name to have src then dst get_keys(rt, route_table, ['Routes', 'Associations', 'PropagatingVgws']) # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].route_tables[rt['RouteTableId']] = route_table
def _security_group_to_attack_surface(self, attack_surface_config, public_ip, current_path, security_groups, listeners=None): listeners = [] if listeners is None else listeners manage_dictionary(attack_surface_config, public_ip, {'protocols': {}}) instance_path = current_path[:-3] if 'ec2' in self.service_list: # validate that the service was included in run for sg_id in security_groups: sg_path = copy.deepcopy(current_path[0:6]) sg_path[1] = 'ec2' sg_path.append('security_groups') sg_path.append(sg_id) sg_path.append('rules') sg_path.append('ingress') ingress_rules = get_object_at(self, sg_path) for p in ingress_rules['protocols']: for port in ingress_rules['protocols'][p]['ports']: if len(listeners) == 0 and 'cidrs' in ingress_rules[ 'protocols'][p]['ports'][port]: manage_dictionary( attack_surface_config[public_ip]['protocols'], p, {'ports': {}}) manage_dictionary( attack_surface_config[public_ip]['protocols'] [p]['ports'], port, {'cidrs': []}) attack_surface_config[public_ip]['protocols'][p]['ports'][port]['cidrs'] += \ ingress_rules['protocols'][p]['ports'][port]['cidrs'] else: ports = port.split('-') if len(ports) > 1: port_min = int(ports[0]) port_max = int(ports[1]) elif port == 'N/A': port_min = port_max = None elif port == 'ALL': port_min = 0 port_max = 65535 elif p == 'ICMP': port_min = port_max = None else: port_min = port_max = int(port) for listener in listeners: if (port_min and port_max) and port_min < int(listener) < port_max and \ 'cidrs' in ingress_rules['protocols'][p]['ports'][port]: manage_dictionary( attack_surface_config[public_ip] ['protocols'], p, {'ports': {}}) manage_dictionary( attack_surface_config[public_ip] ['protocols'][p]['ports'], str(listener), {'cidrs': []}) attack_surface_config[public_ip]['protocols'][p]['ports'][str(listener)]['cidrs'] += \ ingress_rules['protocols'][p]['ports'][port]['cidrs']
def match_instances_and_subnets_callback(self, current_config, path, current_path, instance_id, callback_args): if self.services.get('ec2') and self.services.get('vpc'): # validate both services were included in run subnet_id = current_config['SubnetId'] if subnet_id: vpc = self.subnet_map[subnet_id] subnet = self.services['vpc']['regions'][vpc['region'] ]['vpcs'][vpc['vpc_id']]['subnets'][subnet_id] manage_dictionary(subnet, 'instances', []) if instance_id not in subnet['instances']: subnet['instances'].append(instance_id)
def match_instances_and_subnets_callback(self, current_config, path, current_path, instance_id, callback_args): subnet_id = current_config['SubnetId'] if subnet_id: vpc = self.subnet_map[subnet_id] subnet = self.services['vpc']['regions'][vpc['region']]['vpcs'][ vpc['vpc_id']]['subnets'][subnet_id] manage_dictionary(subnet, 'instances', []) if instance_id not in subnet['instances']: subnet['instances'].append(instance_id)
def parse_vpc(self, global_params, region_name, vpc): """ :param global_params: :param region_name: :param vpc: :return: """ vpc_id = vpc['VpcId'] # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].name = get_name(vpc, {}, 'VpcId')
def manage_vpc(vpc_info, vpc_id): """ Ensure name and ID are set :param vpc_info: :param vpc_id: :return: """ manage_dictionary(vpc_info, vpc_id, {}) vpc_info[vpc_id]['id'] = vpc_id if 'name' not in vpc_info[vpc_id]: vpc_info[vpc_id]['name'] = vpc_id
def _parse_volume_rules(self, rules): volumes = {} rules_count = 0 for rule in rules: volume_id = rule["SnapshotId"] if "SnapshotId" in rule else None if volume_id is None: volume_id = "EMPTY" volumes = manage_dictionary(volumes, volume_id, {}) volumes[volume_id] = manage_dictionary(volumes[volume_id], 'volumes', {}) rules_count += 1 return volumes, rules_count
def _update_sg_usage_codebuild(self): try: for region in self.services['codebuild']['regions']: for codebuild_project in self.services['codebuild']['regions'][region]['build_projects']: if 'vpc' in self.services['codebuild']['regions'][region]['build_projects'][codebuild_project] and 'security_groups' in self.services['codebuild']['regions'][region]['build_projects'][codebuild_project]: cb_project = self.services['codebuild']['regions'][region]['build_projects'][codebuild_project] for cb_project_sg in cb_project['security_groups']: manage_dictionary(self.services['ec2']['regions'][region]['vpcs'][cb_project['vpc']]['security_groups'][cb_project_sg], 'used_by', {'resource_type': {'codebuild_project': []}}) self.services['ec2']['regions'][region]['vpcs'][cb_project['vpc']]['security_groups'][cb_project_sg]['used_by']['resource_type']['codebuild_project'].append({ 'id': cb_project['arn'], 'name': cb_project['name'] }) except Exception as e: print_exception(f'Failed to update security group usage for CodeBuild: {e}')
def store_target(self, global_params, region, target): target_type = target.pop('scout2_target_type') if 'VpcId' in target: vpc_id = target.pop('VpcId') manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) tmp = getattr(self, 'vpcs')[vpc_id] target_dict = getattr(tmp, target_type) else: target_dict = getattr(self, target_type) target_id = target[resource_id_map[target_type]] get_name(target, target, resource_id_map[target_type]) target_dict[target_id] = target
def __init__(self, ruleset): # Organize rules by path self.ruleset = ruleset self.rules = {} for filename in self.ruleset.rules: for rule in self.ruleset.rules[filename]: if not rule.enabled: continue try: manage_dictionary(self.rules, rule.path, []) self.rules[rule.path].append(rule) except Exception as e: print_exception(f'Failed to create rule {rule.filename}: {e}')
def _parse_snapshot_rules(self, rule): snapshots = {} rules_count = 0 snapshot_id = rule["SnapshotId"] if "SnapshotId" in rule else "EMPTY" snapshots = manage_dictionary(snapshots, snapshot_id, {}) snapshots[snapshot_id] = manage_dictionary(snapshots[snapshot_id], 'volume', {}) rules_count += 1 volume_id = "NO VOLUME" if "VolumeId" in rule: volume_id = rule["VolumeId"] manage_dictionary(snapshots[snapshot_id]['volume'], volume_id, {}) return snapshots, rules_count
def parse_cluster(self, global_params, region, cluster): """ Parse a single Redshift cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Cluster """ vpc_id = cluster.pop('VpcId') if 'VpcId' in cluster else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) name = cluster.pop('ClusterIdentifier') cluster['name'] = name self.vpcs[vpc_id].clusters[name] = cluster
def run(self, cloud_provider, skip_dashboard=False): # Clean up existing findings for service in cloud_provider.services: cloud_provider.services[service][self.ruleset.rule_type] = {} # Process each rule for finding_path in self._filter_rules(self.rules, cloud_provider.service_list): for rule in self.rules[finding_path]: if not rule.enabled: # or rule.service not in []: # TODO: handle this... continue print_debug(f'Processing {rule.service} rule "{rule.description}" ({rule.filename})') finding_path = rule.path path = finding_path.split('.') service = path[0] manage_dictionary(cloud_provider.services[service], self.ruleset.rule_type, {}) cloud_provider.services[service][self.ruleset.rule_type][rule.key] = {} cloud_provider.services[service][self.ruleset.rule_type][rule.key]['description'] = rule.description cloud_provider.services[service][self.ruleset.rule_type][rule.key]['path'] = rule.path for attr in ['level', 'id_suffix', 'class_suffix', 'display_path']: if hasattr(rule, attr): cloud_provider.services[service][self.ruleset.rule_type][rule.key][attr] = getattr(rule, attr) try: setattr(rule, 'checked_items', 0) cloud_provider.services[service][self.ruleset.rule_type][rule.key]['items'] = recurse( cloud_provider.services, cloud_provider.services, path, [], rule, True) print(cloud_provider.services[service][self.ruleset.rule_type][rule.key]['items']) if skip_dashboard: continue cloud_provider.services[service][self.ruleset.rule_type][rule.key]['dashboard_name'] = \ rule.dashboard_name cloud_provider.services[service][self.ruleset.rule_type][rule.key]['checked_items'] = \ rule.checked_items cloud_provider.services[service][self.ruleset.rule_type][rule.key]['flagged_items'] = \ len(cloud_provider.services[service][self.ruleset.rule_type][rule.key]['items']) cloud_provider.services[service][self.ruleset.rule_type][rule.key]['service'] = rule.service cloud_provider.services[service][self.ruleset.rule_type][rule.key]['rationale'] = \ rule.rationale if hasattr(rule, 'rationale') else None cloud_provider.services[service][self.ruleset.rule_type][rule.key]['remediation'] = \ rule.remediation if hasattr(rule, 'remediation') else None cloud_provider.services[service][self.ruleset.rule_type][rule.key]['compliance'] = \ rule.compliance if hasattr(rule, 'compliance') else None cloud_provider.services[service][self.ruleset.rule_type][rule.key]['references'] = \ rule.references if hasattr(rule, 'references') else None except Exception as e: print_exception(f'Failed to process rule defined in {rule.filename}: {e}') # Fallback if process rule failed to ensure report creation and data dump still happen cloud_provider.services[service][self.ruleset.rule_type][rule.key]['checked_items'] = 0 cloud_provider.services[service][self.ruleset.rule_type][rule.key]['flagged_items'] = 0
def _update_iam_permissions(self, s3_info, bucket_name, iam_entity, allowed_iam_entity, policy_info): if self.services.get('s3') and self.services.get( 'iam'): # validate both services were included in run if bucket_name != '*' and bucket_name in s3_info['buckets']: bucket = s3_info['buckets'][bucket_name] manage_dictionary(bucket, iam_entity, {}) manage_dictionary(bucket, iam_entity + '_count', 0) if allowed_iam_entity not in bucket[iam_entity]: bucket[iam_entity][allowed_iam_entity] = {} bucket[iam_entity + '_count'] = bucket[iam_entity + '_count'] + 1 if 'inline_policies' in policy_info: manage_dictionary(bucket[iam_entity][allowed_iam_entity], 'inline_policies', {}) bucket[iam_entity][allowed_iam_entity][ 'inline_policies'].update( policy_info['inline_policies']) if 'policies' in policy_info: manage_dictionary(bucket[iam_entity][allowed_iam_entity], 'policies', {}) bucket[iam_entity][allowed_iam_entity]['policies'].update( policy_info['policies']) elif bucket_name == '*': for bucket in s3_info['buckets']: self._update_iam_permissions(s3_info, bucket, iam_entity, allowed_iam_entity, policy_info) else: # Could be an error or cross-account access, ignore pass
def __parse_security_group_rules(rules): """ :param rules: :return: """ protocols = {} rules_count = 0 for rule in rules: ip_protocol = rule['IpProtocol'].upper() if ip_protocol == '-1': ip_protocol = 'ALL' protocols = manage_dictionary(protocols, ip_protocol, {}) protocols[ip_protocol] = manage_dictionary(protocols[ip_protocol], 'ports', {}) # Save the port (single port or range) port_value = 'N/A' if 'FromPort' in rule and 'ToPort' in rule: if ip_protocol == 'ICMP': # FromPort with ICMP is the type of message port_value = icmp_message_types_dict[str(rule['FromPort'])] elif rule['FromPort'] == rule['ToPort']: port_value = str(rule['FromPort']) else: port_value = '%s-%s' % (rule['FromPort'], rule['ToPort']) manage_dictionary(protocols[ip_protocol]['ports'], port_value, {}) # Save grants, values are either a CIDR or an EC2 security group for grant in rule['UserIdGroupPairs']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'security_groups', []) protocols[ip_protocol]['ports'][port_value][ 'security_groups'].append(grant) rules_count = rules_count + 1 for grant in rule['IpRanges']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'cidrs', []) protocols[ip_protocol]['ports'][port_value]['cidrs'].append( {'CIDR': grant['CidrIp']}) rules_count = rules_count + 1 # IPv6 for grant in rule['Ipv6Ranges']: manage_dictionary(protocols[ip_protocol]['ports'][port_value], 'cidrs', []) protocols[ip_protocol]['ports'][port_value]['cidrs'].append( {'CIDR': grant['CidrIpv6']}) rules_count = rules_count + 1 return protocols, rules_count
def list_ec2_network_attack_surface_callback(self, current_config, path, current_path, privateip_id, callback_args): manage_dictionary(self.services['ec2'], 'external_attack_surface', {}) if 'Association' in current_config and current_config['Association']: public_ip = current_config['Association']['PublicIp'] self._security_group_to_attack_surface(self.services['ec2']['external_attack_surface'], public_ip, current_path, [g['GroupId'] for g in current_config['Groups']], []) # IPv6 if 'Ipv6Addresses' in current_config and len(current_config['Ipv6Addresses']) > 0: for ipv6 in current_config['Ipv6Addresses']: ip = ipv6['Ipv6Address'] self._security_group_to_attack_surface(self.services['ec2']['external_attack_surface'], ip, current_path, [g['GroupId'] for g in current_config['Groups']], [])
def parse_cluster(self, global_params, region, cluster): """ Parse a single EMR cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: EMR cluster """ cluster_id = cluster['Id'] cluster = api_clients[region].describe_cluster(ClusterId=cluster_id)['Cluster'] cluster['id'] = cluster.pop('Id') cluster['name'] = cluster.pop('Name') vpc_id = 'TODO' # The EMR API won't disclose the VPC ID, so wait until all configs have been fetch and look # up the VPC based on the subnet ID manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_id] = cluster
def get_security_groups_info(rds_client, region_info): groups = rds_client.describe_db_security_groups()['DBSecurityGroups'] manage_dictionary(region_info, 'vpcs', {}) manage_dictionary(region_info['vpcs'], ec2_classic, {}) manage_dictionary(region_info['vpcs'][ec2_classic], 'security_groups', {}) manage_dictionary(region_info, 'security_groups_count', 0) region_info['security_groups_count'] += len(groups) for group in groups: region_info['vpcs'][ec2_classic]['security_groups'][group['DBSecurityGroupName']] = parse_security_group(group)