예제 #1
0
def audit_lightsail(findings, region):
    # Just check if lightsail is in use
    json_blob = query_aws(region.account, "lightsail-get-instances", region)
    if json_blob is None:
        # Service not supported in the region
        return
    if len(json_blob.get('instances', [])) > 0:
        findings.add(
            Finding(region,
                    'LIGHTSAIL_IN_USE',
                    None,
                    resource_details={
                        'instance count': len(json_blob['instances'])
                    }))

    json_blob = query_aws(region.account, "lightsail-get-load-balancers",
                          region)
    if json_blob is None:
        # Service not supported in the region
        return
    if len(json_blob.get('loadBalancers', [])) > 0:
        findings.add(
            Finding(region,
                    'LIGHTSAIL_IN_USE',
                    None,
                    resource_details={
                        'load balancer count': len(json_blob['loadBalancers'])
                    }))
예제 #2
0
def audit_ec2(findings, region):
    json_blob = query_aws(region.account, 'ec2-describe-instances', region)
    route_table_json = query_aws(region.account, 'ec2-describe-route-tables', region)

    for reservation in json_blob.get('Reservations', []):
        for instance in reservation.get('Instances', []):
            if instance.get('State', {}).get('Name', '') == 'terminated':
                # Ignore EC2's that are off
                continue

            if 'vpc' not in instance.get('VpcId', ''):
                findings.add(Finding(
                    region,
                    'EC2_CLASSIC',
                    instance['InstanceId']))

            if not instance.get('SourceDestCheck', True):
                route_to_instance = None
                for table in route_table_json['RouteTables']:
                    if table['VpcId'] == instance.get('VpcId', ''):
                        for route in table['Routes']:
                            if route.get('InstanceId', '') == instance['InstanceId']:
                                route_to_instance = route
                                break
                    if route_to_instance is not None:
                        break
                findings.add(Finding(
                    region,
                    'EC2_SOURCE_DEST_CHECK_OFF',
                    instance['InstanceId'],
                    resource_details={'routes': route_to_instance}))
예제 #3
0
def audit_ec2(region):
    json_blob = query_aws(region.account, 'ec2-describe-instances', region)
    route_table_json = query_aws(region.account, 'ec2-describe-route-tables', region)

    ec2_classic_count = 0
    for reservation in json_blob.get('Reservations', []):
        for instance in reservation.get('Instances', []):
            if instance.get('State', {}).get('Name', '') == 'terminated':
                # Ignore EC2's that are off
                continue

            if 'vpc' not in instance.get('VpcId', ''):
                ec2_classic_count += 1

            if not instance.get('SourceDestCheck', True):
                print('- EC2 SourceDestCheck is off: {}'.format(instance['InstanceId']))

                route_to_instance = None
                for table in route_table_json['RouteTables']:
                    if table['VpcId'] == instance.get('VpcId', ''):
                        for route in table['Routes']:
                            if route['GatewayId'] == instance['InstanceId']:
                                route_to_instance = route
                                break
                    if route_to_instance is not None:
                        break

                if route_to_instance is None:
                    print('  - No routes to instance, SourceDestCheck is not doing anything')
                else:
                    print('  -Routes: {}'.format(route_to_instance))

    if ec2_classic_count != 0:
        print('- EC2 classic instances found: {}'.format(ec2_classic_count))
예제 #4
0
def amis(args, accounts, config):
    # Loading the list of public images from disk takes a while, so we'll iterate by region

    regions_file = 'data/aws/us-east-1/ec2-describe-images.json'
    if not os.path.isfile(regions_file):
        raise Exception(
            "You need to download the set of public AMI images.  Run:\n"
            "  mkdir -p data/aws\n"
            "  cd data/aws\n"
            "  aws ec2 describe-regions | jq -r '.Regions[].RegionName' | xargs -I{} mkdir {}\n"
            "  aws ec2 describe-regions | jq -r '.Regions[].RegionName' | xargs -I{} sh -c 'aws --region {} ec2 describe-images --executable-users all > {}/ec2-describe-images.json'\n"
        )

    print("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
        'Account Name', 'Region Name', 'Instance Id', 'Instance Name',
        'AMI ID', 'Is Public', 'AMI Description', 'AMI Owner'))

    for region in listdir('data/aws/'):
        # Get public images
        public_images_file = 'data/aws/{}/ec2-describe-images.json'.format(
            region)
        public_images = json.load(open(public_images_file))
        resource_filter = '.Images[]'
        public_images = pyjq.all(resource_filter, public_images)

        for account in accounts:
            account = Account(None, account)
            region = Region(account, {'RegionName': region})

            instances = query_aws(account, "ec2-describe-instances", region)
            resource_filter = '.Reservations[].Instances[] | select(.State.Name == "running")'
            if args.instance_filter != '':
                resource_filter += '|{}'.format(args.instance_filter)
            instances = pyjq.all(resource_filter, instances)

            account_images = query_aws(account, "ec2-describe-images", region)
            resource_filter = '.Images[]'
            account_images = pyjq.all(resource_filter, account_images)

            for instance in instances:
                image_id = instance['ImageId']
                image_description = ''
                owner = ''
                image, is_public_image = find_image(image_id, public_images,
                                                    account_images)
                if image:
                    # Many images don't have all fields, so try the Name, then Description, then ImageLocation
                    image_description = image.get('Name', '')
                    if image_description == '':
                        image_description = image.get('Description', '')
                        if image_description == '':
                            image_description = image.get('ImageLocation', '')
                    owner = image.get('OwnerId', '')

                print("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
                    account.name, region.name, instance['InstanceId'],
                    get_instance_name(instance), image_id, is_public_image,
                    image_description, owner))
예제 #5
0
def get_elbs(subnet, outputfilter):
    # ELBs
    elb_instances = query_aws(subnet.account, "elb-describe-load-balancers",
                              subnet.region)
    elb_resource_filter = '.LoadBalancerDescriptions[] | select(.VPCId == "{}") | select(.Subnets[] == "{}")'
    elbs = pyjq.all(
        elb_resource_filter.format(subnet.vpc.local_id, subnet.local_id),
        elb_instances)

    # ALBs and NLBs
    alb_instances = query_aws(subnet.account, "elbv2-describe-load-balancers",
                              subnet.region)
    alb_resource_filter = '.LoadBalancers[] | select(.VpcId == "{}") | select(.AvailabilityZones[].SubnetId == "{}")'
    albs = pyjq.all(
        alb_resource_filter.format(subnet.vpc.local_id, subnet.local_id),
        alb_instances)

    if 'tags' not in outputfilter:
        return elbs + albs

    # There are tags requested, so we need to filter these
    tag_filter = ""
    tag_set_conditions = []
    for tag_set in outputfilter.get("tags", []):
        conditions = [c.split("=") for c in tag_set.split(",")]
        condition_queries = []
        for pair in conditions:
            if len(pair) == 2:
                condition_queries.append('.{} == "{}"'.format(
                    pair[0], pair[1]))
        tag_set_conditions.append('(' + ' and '.join(condition_queries) + ')')
    tag_filter = 'select(.TagDescriptions[0].Tags | from_entries | ' + ' or '.join(
        tag_set_conditions) + ')'

    filtered_elbs = []
    for elb in elbs:
        tags = get_parameter_file(subnet.region, 'elb', 'describe-tags',
                                  elb['LoadBalancerName'])
        if tags is None:
            continue

        if pyjq.first(tag_filter, tags) is not None:
            filtered_elbs.append(elb)

    for elb in albs:
        tags = get_parameter_file(subnet.region, 'elbv2', 'describe-tags',
                                  elb['LoadBalancerArn'])
        if tags is None:
            continue

        if pyjq.first(tag_filter, tags) is not None:
            filtered_elbs.append(elb)

    return filtered_elbs
예제 #6
0
def get_elbs(subnet):
    # ELBs
    elb_instances = query_aws(subnet.account, "elb-describe-load-balancers", subnet.region)
    elb_resource_filter = '.LoadBalancerDescriptions[] | select(.VPCId == "{}") | select(.Subnets[] == "{}")'
    elbs = pyjq.all(elb_resource_filter.format(subnet.vpc.local_id, subnet.local_id), elb_instances)

    # ALBs and NLBs
    alb_instances = query_aws(subnet.account, "elbv2-describe-load-balancers", subnet.region)
    alb_resource_filter = '.LoadBalancers[] | select(.VPCId == "{}") | select(.Subnets[] == "{}")'
    albs = pyjq.all(alb_resource_filter.format(subnet.vpc.local_id, subnet.local_id), alb_instances)

    return elbs + albs
예제 #7
0
def audit_rds(region):
    json_blob = query_aws(region.account, "rds-describe-db-instances", region)
    for instance in json_blob.get('DBInstances', []):
        if instance['PubliclyAccessible']:
            print('- RDS instance in {} is public: {}'.format(region.name, instance['DBInstanceIdentifier']))
        if instance.get('DBSubnetGroup', {}).get('VpcId', '') == '':
            print('- RDS instance in {} is in VPC classic: {}'.format(region.name, instance['DBInstanceIdentifier']))
def find_unused_volumes(region):
    unused_volumes = []
    volumes = query_aws(region.account, "ec2-describe-volumes", region)
    for volume in pyjq.all('.Volumes[]|select(.State=="available")', volumes):
        unused_volumes.append({"id": volume["VolumeId"]})

    return unused_volumes
예제 #9
0
def audit_es(findings, region):
    json_blob = query_aws(region.account, 'es-list-domain-names', region)
    for domain in json_blob.get('DomainNames', []):
        name = domain['DomainName']

        # Check policy
        policy_file_json = get_parameter_file(region, 'es',
                                              'describe-elasticsearch-domain',
                                              name)
        # Find the entity we need
        policy_string = policy_file_json['DomainStatus']['AccessPolicies']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)

        # ES clusters or either public, with an "Endpoint" (singular), which is bad, or
        # they are VPC-only, in which case they have an "Endpoints" (plural) array containing a "vpc" element
        if policy_file_json['DomainStatus'].get(
                'Endpoint', '') != '' or policy_file_json['DomainStatus'].get(
                    'Endpoints', {}).get('vpc', '') == '':
            if policy.is_internet_accessible():
                findings.add(
                    Finding(region,
                            'ES_PUBLIC',
                            name,
                            resource_details=policy_string))
예제 #10
0
def audit_password_policy(findings, region):
    json_blob = query_aws(region.account, "iam-get-account-password-policy", region)
    if json_blob is None or json_blob.get('PasswordPolicy', {}) == {}:
        findings.add(Finding(
            region,
            'PASSWORD_POLICY_NOT_SET',
            None,
            None))
    else:
        if json_blob['PasswordPolicy'].get('MinimumPasswordLength', 0) < 12:
            findings.add(Finding(
                region,
                'PASSWORD_POLICY_CHARACTER_MINIMUM',
                None,
                resource_details={'MinimumPasswordLength': json_blob['PasswordPolicy'].get('MinimumPasswordLength', 0)}))

        lacking_character_requirements = []
        if not json_blob['PasswordPolicy'].get('RequireNumbers', False):
            lacking_character_requirements.append('RequireNumbers')
        if not json_blob['PasswordPolicy'].get('RequireSymbols', False):
            lacking_character_requirements.append('RequireSymbols')
        if not json_blob['PasswordPolicy'].get('RequireLowercaseCharacters', False):
            lacking_character_requirements.append('RequireLowercaseCharacters')
        if not json_blob['PasswordPolicy'].get('RequireUppercaseCharacters', False):
            lacking_character_requirements.append('RequireUppercaseCharacters')
        if len(lacking_character_requirements) > 0:
            findings.add(Finding(
                region,
                'PASSWORD_POLICY_CHARACTER_SET_REQUIREMENTS',
                None,
                resource_details={'Policy lacks': lacking_character_requirements}))
예제 #11
0
def get_iam_trusts(account, nodes, connections, connections_to_get):
    # Get IAM
    iam = query_aws(
        account,
        "iam-get-account-authorization-details",
        Region(account, {'RegionName':'us-east-1'}))

    for role in pyjq.all('.RoleDetailList[]', iam):
        principals = pyjq.all('.AssumeRolePolicyDocument.Statement[].Principal', role)
        for principal in principals:
            assume_role_nodes = set()
            if principal.get('Federated', None):
                # TODO I should be using get-saml-provider to confirm this is really okta
                if "saml-provider/okta" in principal['Federated'].lower():
                    node = Account(json_blob={'id':'okta', 'name':'okta', 'type':'Okta'})
                    assume_role_nodes.add(node)
                elif "saml-provider/adfs" in principal['Federated'].lower():
                    node = Account(json_blob={'id':'adfs', 'name':'adfs', 'type':'ADFS'})
                    assume_role_nodes.add(node)
                elif principal['Federated'] == 'cognito-identity.amazonaws.com':
                    # TODO: Should show this somehow
                    continue
                elif principal['Federated'] == 'www.amazon.com':
                    node = Account(json_blob={'id':'Amazon.com', 'name':'Amazon.com', 'type':'Amazon'})
                    continue
                else:
                    raise Exception('Unknown federation provider: {}'.format(principal['Federated']))
            if principal.get('AWS', None):
                principal = principal['AWS']
                if not isinstance(principal, list):
                    principal = [principal]
                for p in principal:
                    if "arn:aws" not in p:
                        # The role can simply be something like "AROA..."
                        continue
                    parts = p.split(':')
                    account_id = parts[4]
                    assume_role_nodes.add(Account(account_id=account_id))

            for node in assume_role_nodes:
                if nodes.get(node.id, None) is None:
                    nodes[node.id] = node
                access_type = 'iam'
                # TODO: Identify all admins better.  Use code from find_admins.py
                for m in role['AttachedManagedPolicies']:
                    for p in pyjq.all('.Policies[]', iam):
                        if p['Arn'] == m['PolicyArn']:
                            for policy_doc in p['PolicyVersionList']:
                                if policy_doc['IsDefaultVersion'] == True:
                                    if is_admin_policy(policy_doc['Document']):
                                        access_type = 'admin'
                for policy in role['RolePolicyList']:
                    policy_doc = policy['PolicyDocument']
                    if is_admin_policy(policy_doc):
                        access_type = 'admin'

                if ((access_type == 'admin' and connections_to_get['admin']) or
                        (access_type != 'admin' and connections_to_get['iam_nonadmin'])):
                    connections[Connection(node, account, access_type)] = []
    return
예제 #12
0
파일: audit.py 프로젝트: theshz/cloudmapper
def audit_cloudfront(region):
    json_blob = query_aws(region.account, 'cloudfront-list-distributions',
                          region)

    # Ignore cert issues, as urlopen doesn't understand '*.s3.amazonaws.com'
    ctx = ssl.create_default_context()
    ctx.check_hostname = False
    ctx.verify_mode = ssl.CERT_NONE

    for distribution in json_blob.get('DistributionList', {}).get('Items', []):
        if not distribution['Enabled']:
            continue

        minimum_protocol_version = distribution.get('ViewerCertificate', {}) \
            .get('MinimumProtocolVersion', '')
        if minimum_protocol_version == 'SSLv3':
            print(
                '- CloudFront is using insecure minimum protocol version {} for {} in {}'
                .format(minimum_protocol_version, distribution['DomainName'],
                        region.name))

        domain = distribution['DomainName']

        # TODO: Not sure if this works.  I'm trying to see if I can access the cloudfront distro,
        # or if I get a 403
        # This is from https://github.com/MindPointGroup/cloudfrunt/blob/master/cloudfrunt.py
        try:
            urllib.request.urlopen('https://' + domain, context=ctx)
        except urllib.error.HTTPError as e:
            if e.code == 403 and 'Bad request' in str(e.fp.read()):
                print('- CloudFront distribution {} is missing origin'.format(
                    distribution['DomainName']))
예제 #13
0
def find_unused_elastic_ips(region):
    unused_ips = []
    ips = query_aws(region.account, "ec2-describe-addresses", region)
    for ip in pyjq.all(".Addresses[] | select(.AssociationId == null)", ips):
        unused_ips.append({"id": ip["AllocationId"], "ip": ip["PublicIp"]})

    return unused_ips
예제 #14
0
def get_account_stats(account):
    """Returns stats for an account"""
    account = Account(None, account)
    log_debug('Collecting stats in account {} ({})'.format(account.name, account.local_id))

    # Init stats to {}
    stats = OrderedDict()
    for k in resources:
        stats[k] = {}

    for region_json in get_regions(account):
        region = Region(account, region_json)

        for key, resource in resources.items():
            # Skip global services (just CloudFront)
            if ('region' in resource) and (resource['region'] != region.name):
                continue

            # Check exceptions that require special code to perform the count
            if key == 'route53_record':
                path = 'account-data/{}/{}/{}'.format(
                    account.name,
                    region.name,
                    'route53-list-resource-record-sets')
                if os.path.isdir(path):
                    stats[key][region.name] = 0
                    for f in listdir(path):
                        json_data = json.load(open(os.path.join(path, urllib.parse.quote_plus(f))))
                        stats[key][region.name] += sum(pyjq.all('.ResourceRecordSets|length', json_data))
            else:
                # Normal path
                stats[key][region.name] = sum(pyjq.all(resource['query'], 
                    query_aws(region.account, resource['source'], region)))

    return stats
예제 #15
0
def audit_sqs(region):
    # Check for publicly accessible sqs.
    json_blob = query_aws(region.account, "sqs-list-queues", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for queue in json_blob.get('QueueUrls', []):
        # Check policy
        attributes = get_parameter_file(region, 'sqs', 'get-queue-attributes',
                                        queue)
        if attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = attributes['Attributes']
        if 'Policy' in attributes:
            policy_string = attributes['Policy']
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            print('- Internet accessible SQS {}: {}'.format(
                name, policy_string))
예제 #16
0
def audit_elb(region):
    json_blob = query_aws(region.account, 'elb-describe-load-balancers',
                          region)
    for description in json_blob.get('LoadBalancerDescriptions', []):
        if len(description['Instances']) == 0:
            print('- ELB has no backend instances: {} in {}'.format(
                description['DNSName'], region.name))
예제 #17
0
def audit_s3_buckets(region):
    buckets_json = query_aws(region.account, "s3-list-buckets", region)
    buckets = pyjq.all('.Buckets[].Name', buckets_json)
    for bucket in buckets:
        # Check policy
        try:
            policy_file_json = get_parameter_file(region, 's3',
                                                  'get-bucket-policy', bucket)
            # Find the entity we need
            policy_string = policy_file_json['Policy']
            # Load the string value as json
            policy = json.loads(policy_string)
            policy = Policy(policy)
            if policy.is_internet_accessible():
                print('- Internet accessible S3 bucket {}: {}'.format(
                    bucket, policy_string))
        except Exception as e:
            print('- Exception checking policy of S3 bucket {}: {}; e'.format(
                bucket, policy_string, e))

        # Check ACL
        try:
            file_json = get_parameter_file(region, 's3', 'get-bucket-acl',
                                           bucket)
            for grant in file_json['Grants']:
                uri = grant['Grantee'].get('URI', "")
                if (uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
                        or uri ==
                        'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
                    ):
                    print('- Public grant to S3 bucket {}: {}'.format(
                        bucket, grant))
        except Exception as e:
            print('- Exception checking ACL of S3 bucket {}: {}; {}'.format(
                bucket, grant, e))
예제 #18
0
def get_cidrs_for_account(account, cidrs):
    account = Account(None, account)

    # TODO Need to use CloudMapper's prepare to identify trusted IPs that are actually in use.
    for region_json in get_regions(account):
        region = Region(account, region_json)
        sg_json = query_aws(account, "ec2-describe-security-groups", region)
        sgs = pyjq.all('.SecurityGroups[]', sg_json)
        for sg in sgs:
            cidrs_seen = set()
            cidr_and_name_list = pyjq.all('.IpPermissions[].IpRanges[]|[.CidrIp,.Description]', sg)
            for cidr, name in cidr_and_name_list:
                if not is_external_cidr(cidr):
                    continue

                if is_unneeded_cidr(cidr):
                    print('WARNING: Unneeded cidr used {} in {}'.format(cidr, sg['GroupId']))
                    continue

                for cidr_seen in cidrs_seen:
                    if (IPNetwork(cidr_seen) in IPNetwork(cidr) or
                                IPNetwork(cidr) in IPNetwork(cidr_seen)):
                        print('WARNING: Overlapping CIDRs in {}, {} and {}'.format(sg['GroupId'], cidr, cidr_seen))
                cidrs_seen.add(cidr)

                if cidr.startswith('0.0.0.0') and not cidr.endswith('/0'):
                    print('WARNING: Unexpected CIDR for attempted public access {} in {}'.format(cidr, sg['GroupId']))
                    continue

                if cidr == '0.0.0.0/0':
                    continue

                cidrs[cidr] = cidrs.get(cidr, set())
                if name is not None:
                    cidrs[cidr].add(name)
예제 #19
0
def api_endpoints(accounts, config):
    for account in accounts:
        account = Account(None, account)
        for region_json in get_regions(account):
            region = Region(account, region_json)
            
            # Look for API Gateway
            json_blob = query_aws(region.account, 'apigateway-get-rest-apis', region)
            for api in json_blob.get('items', []):
                rest_id = api['id']
                deployments = get_parameter_file(region, 'apigateway', 'get-deployments', rest_id)
                if deployments is None:
                    continue
                for deployment in deployments['items']:
                    deployment_id = deployment['id']
                    stages = get_parameter_file(region, 'apigateway', 'get-stages', rest_id)
                    if stages is None:
                        continue
                    for stage in stages['item']:
                        if stage['deploymentId'] == deployment_id:
                            resources = get_parameter_file(region, 'apigateway', 'get-resources', rest_id)
                            if resources is None:
                                continue
                            for resource in resources['items']:
                                print('{}.execute-api.{}.amazonaws.com/{}{}'.format(
                                    api['id'],
                                    region.name,
                                    stage['stageName'],
                                    resource['path']))
예제 #20
0
def audit_rds_snapshots(findings, region):
    json_blob = query_aws(region.account, "rds-describe-db-snapshots", region)
    for snapshot in json_blob.get('DBSnapshots', []):
        try:
            file_json = get_parameter_file(region, 'rds',
                                           'describe-db-snapshot-attributes',
                                           snapshot['DBSnapshotIdentifier'])
            for attribute in file_json['DBSnapshotAttributesResult'][
                    'DBSnapshotAttributes']:
                if attribute['AttributeName'] == 'restore':
                    if "all" in attribute['AttributeValues']:
                        findings.add(
                            Finding(region,
                                    'RDS_PUBLIC_SNAPSHOT',
                                    snapshot,
                                    resource_details={
                                        'Entities allowed to restore':
                                        attribute['AttributeValues']
                                    }))
        except OSError:
            findings.add(
                Finding(region,
                        'EXCEPTION',
                        None,
                        resource_details={
                            'location': 'Could not open RDS snapshot file',
                            'file_name': file_name
                        }))
예제 #21
0
def audit_ebs_snapshots(findings, region):
    json_blob = query_aws(region.account, "ec2-describe-snapshots", region)
    for snapshot in json_blob['Snapshots']:
        try:
            file_json = get_parameter_file(region, 'ec2', 'describe-snapshot-attribute', snapshot['SnapshotId'])
            if file_json == None:
                # Not technically an exception, but an unexpected situation
                findings.add(Finding(
                    region,
                    'EXCEPTION',
                    snapshot,
                    resource_details={'location': 'EBS snapshot has no attributes'}))
                continue
            for attribute in file_json['CreateVolumePermissions']:
                if attribute.get('Group', 'self') != 'self':
                    findings.add(Finding(
                        region,
                        'EBS_SNAPSHOT_PUBLIC',
                        snapshot,
                        resource_details={'Entities allowed to restore': attribute['Group']}))
        except OSError:
            findings.add(Finding(
                region,
                'EXCEPTION',
                None,
                resource_details={
                    'location': 'Could not open EBS snapshot file',
                    'file_name': file_name}))
예제 #22
0
def audit_redshift(findings, region):
    json_blob = query_aws(region.account, "redshift-describe-clusters", region)
    for cluster in json_blob.get('Clusters', []):
        if cluster['PubliclyAccessible']:
            findings.add(
                Finding(region, 'REDSHIFT_PUBLIC_IP',
                        cluster['ClusterIdentifier']))
예제 #23
0
def audit_route53(region):
    json_blob = query_aws(region.account, "route53domains-list-domains", region)
    for domain in json_blob.get('Domains', []):
        if not domain['AutoRenew']:
            print('- Route53 domain not set to autorenew: {}'.format(domain['DomainName']))
        if not domain['TransferLock']:
            print('- Route53 domain transfer lock not set: {}'.format(domain['DomainName']))
예제 #24
0
def audit_sns(findings, region):
    # Check for publicly accessible sns.
    json_blob = query_aws(region.account, "sns-list-topics", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for topic in json_blob.get('Topics', []):
        # Check policy
        attributes = get_parameter_file(region, 'sns', 'get-topic-attributes', topic['TopicArn'])
        if attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = attributes['Attributes']
        if 'Policy' in attributes:
            policy_string = attributes['Policy']
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'SNS_PUBLIC',
                topic['TopicArn'],
                resource_details=policy_string))
예제 #25
0
def audit_sqs(findings, region):
    # Check for publicly accessible sqs.
    json_blob = query_aws(region.account, "sqs-list-queues", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for queue in json_blob.get('QueueUrls', []):
        queue_name = queue.split("/")[-1]
        # Check policy
        queue_attributes = get_parameter_file(region, 'sqs', 'get-queue-attributes', queue)
        if queue_attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = queue_attributes['Attributes']
        if 'Policy' in attributes:
            policy_string = attributes['Policy']
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'SQS_PUBLIC',
                queue_name,
                resource_details=policy_string))
예제 #26
0
def audit_kms(findings, region):
    # Check for publicly accessible KMS keys.
    json_blob = query_aws(region.account, "kms-list-keys", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for key in json_blob.get('Keys', []):
        name = key['KeyId']

        # Check policy
        policy_file_json = get_parameter_file(region, 'kms', 'get-key-policy', name)
        if policy_file_json is None:
            # No policy
            continue

        # Find the entity we need
        policy_string = policy_file_json['Policy']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'KMS_PUBLIC',
                name,
                resource_details=policy_string))
예제 #27
0
def audit_glacier(findings, region):
    # Check for publicly accessible vaults.
    json_blob = query_aws(region.account, "glacier-list-vaults", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for vault in json_blob.get('VaultList', []):
        name = vault['VaultName']

        # Check policy
        policy_file_json = get_parameter_file(region, 'glacier', 'get-vault-access-policy', name)
        if policy_file_json is None:
            # No policy
            continue

        # Find the entity we need
        policy_string = policy_file_json['policy']['Policy']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'GLACIER_PUBLIC',
                name,
                resource_details=policy_string))
def find_unused_security_groups(region):
    # Get the defined security groups, then find all the Security Groups associated with the
    # ENIs.  Then diff these to find the unused Security Groups.
    used_sgs = set()

    defined_sgs = query_aws(region.account, "ec2-describe-security-groups",
                            region)

    network_interfaces = query_aws(region.account,
                                   "ec2-describe-network-interfaces", region)

    defined_sg_set = {}

    for sg in pyjq.all(".SecurityGroups[]", defined_sgs):
        defined_sg_set[sg["GroupId"]] = sg

    for used_sg in pyjq.all(".NetworkInterfaces[].Groups[].GroupId",
                            network_interfaces):
        used_sgs.add(used_sg)

    # Get the data from the `prepare` command
    outputfilter = {
        "internal_edges": True,
        "read_replicas": True,
        "inter_rds_edges": True,
        "azs": False,
        "collapse_by_tag": None,
        "collapse_asgs": True,
        "mute": True,
    }
    nodes = get_resource_nodes(region, outputfilter)

    for _, node in nodes.items():
        used_sgs.update(node.security_groups)

    unused_sg_ids = set(defined_sg_set) - used_sgs
    unused_sgs = []
    for sg_id in unused_sg_ids:
        unused_sgs.append({
            "id":
            sg_id,
            "name":
            defined_sg_set[sg_id]["GroupName"],
            "description":
            defined_sg_set[sg_id].get("Description", ""),
        })
    return unused_sgs
예제 #29
0
def get_vpcs(region, outputfilter):
    vpc_filter = ""
    if "vpc-ids" in outputfilter:
        vpc_filter += " | select (.VpcId | contains({}))".format(outputfilter["vpc-ids"])
    if "vpc-names" in outputfilter:
        vpc_filter += ' | select(.Tags != null) | select (.Tags[] | (.Key == "Name") and (.Value | contains({})))'.format(outputfilter["vpc-names"])
    vpcs = query_aws(region.account, "ec2-describe-vpcs", region)
    return pyjq.all('.Vpcs[]{}'.format(vpc_filter), vpcs)
예제 #30
0
def audit_amis(findings, region):
    json_blob = query_aws(region.account, "ec2-describe-images", region)
    for image in json_blob.get('Images', []):
        if image['Public']:
            findings.add(Finding(
                region,
                'AMI_PUBLIC',
                image['ImageId']))