Esempio n. 1
0
def get_access_advisor(region, principal_stats, json_account_auth_details,
                       args):
    for principal_auth in [
            *json_account_auth_details['UserDetailList'],
            *json_account_auth_details['RoleDetailList']
    ]:
        stats = {}
        stats['auth'] = principal_auth
        job_id = get_parameter_file(region, 'iam',
                                    'generate-service-last-accessed-details',
                                    principal_auth['Arn'])['JobId']
        json_last_access_details = get_parameter_file(
            region, 'iam', 'get-service-last-accessed-details', job_id)
        stats['last_access'] = json_last_access_details

        stats['is_inactive'] = True

        job_completion_date = datetime.datetime.strptime(
            json_last_access_details['JobCompletionDate'][0:10], '%Y-%m-%d')

        for service in json_last_access_details['ServicesLastAccessed']:
            if 'LastAuthenticated' in service:
                last_access_date = datetime.datetime.strptime(
                    service['LastAuthenticated'][0:10], '%Y-%m-%d')
                service['days_since_last_use'] = (job_completion_date -
                                                  last_access_date).days
                if service['days_since_last_use'] < args.max_age:
                    stats['is_inactive'] = False
                    break

        principal_stats[principal_auth['Arn']] = stats
Esempio n. 2
0
def audit_s3_buckets(region):
    buckets_json = query_aws(region.account, "s3-list-buckets", region)
    buckets = pyjq.all('.Buckets[].Name', buckets_json)
    for bucket in buckets:
        # Check policy
        try:
            policy_file_json = get_parameter_file(region, 's3',
                                                  'get-bucket-policy', bucket)
            # Find the entity we need
            policy_string = policy_file_json['Policy']
            # Load the string value as json
            policy = json.loads(policy_string)
            policy = Policy(policy)
            if policy.is_internet_accessible():
                print('- Internet accessible S3 bucket {}: {}'.format(
                    bucket, policy_string))
        except Exception as e:
            print('- Exception checking policy of S3 bucket {}: {}; e'.format(
                bucket, policy_string, e))

        # Check ACL
        try:
            file_json = get_parameter_file(region, 's3', 'get-bucket-acl',
                                           bucket)
            for grant in file_json['Grants']:
                uri = grant['Grantee'].get('URI', "")
                if (uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
                        or uri ==
                        'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
                    ):
                    print('- Public grant to S3 bucket {}: {}'.format(
                        bucket, grant))
        except Exception as e:
            print('- Exception checking ACL of S3 bucket {}: {}; {}'.format(
                bucket, grant, e))
Esempio n. 3
0
def api_endpoints(accounts, config):
    for account in accounts:
        account = Account(None, account)
        for region_json in get_regions(account):
            region = Region(account, region_json)
            
            # Look for API Gateway
            json_blob = query_aws(region.account, 'apigateway-get-rest-apis', region)
            for api in json_blob.get('items', []):
                rest_id = api['id']
                deployments = get_parameter_file(region, 'apigateway', 'get-deployments', rest_id)
                if deployments is None:
                    continue
                for deployment in deployments['items']:
                    deployment_id = deployment['id']
                    stages = get_parameter_file(region, 'apigateway', 'get-stages', rest_id)
                    if stages is None:
                        continue
                    for stage in stages['item']:
                        if stage['deploymentId'] == deployment_id:
                            resources = get_parameter_file(region, 'apigateway', 'get-resources', rest_id)
                            if resources is None:
                                continue
                            for resource in resources['items']:
                                print('{}.execute-api.{}.amazonaws.com/{}{}'.format(
                                    api['id'],
                                    region.name,
                                    stage['stageName'],
                                    resource['path']))
Esempio n. 4
0
def get_elbs(subnet, outputfilter):
    # ELBs
    elb_instances = query_aws(subnet.account, "elb-describe-load-balancers",
                              subnet.region)
    elb_resource_filter = '.LoadBalancerDescriptions[] | select(.VPCId == "{}") | select(.Subnets[] == "{}")'
    elbs = pyjq.all(
        elb_resource_filter.format(subnet.vpc.local_id, subnet.local_id),
        elb_instances)

    # ALBs and NLBs
    alb_instances = query_aws(subnet.account, "elbv2-describe-load-balancers",
                              subnet.region)
    alb_resource_filter = '.LoadBalancers[] | select(.VpcId == "{}") | select(.AvailabilityZones[].SubnetId == "{}")'
    albs = pyjq.all(
        alb_resource_filter.format(subnet.vpc.local_id, subnet.local_id),
        alb_instances)

    if 'tags' not in outputfilter:
        return elbs + albs

    # There are tags requested, so we need to filter these
    tag_filter = ""
    tag_set_conditions = []
    for tag_set in outputfilter.get("tags", []):
        conditions = [c.split("=") for c in tag_set.split(",")]
        condition_queries = []
        for pair in conditions:
            if len(pair) == 2:
                condition_queries.append('.{} == "{}"'.format(
                    pair[0], pair[1]))
        tag_set_conditions.append('(' + ' and '.join(condition_queries) + ')')
    tag_filter = 'select(.TagDescriptions[0].Tags | from_entries | ' + ' or '.join(
        tag_set_conditions) + ')'

    filtered_elbs = []
    for elb in elbs:
        tags = get_parameter_file(subnet.region, 'elb', 'describe-tags',
                                  elb['LoadBalancerName'])
        if tags is None:
            continue

        if pyjq.first(tag_filter, tags) is not None:
            filtered_elbs.append(elb)

    for elb in albs:
        tags = get_parameter_file(subnet.region, 'elbv2', 'describe-tags',
                                  elb['LoadBalancerArn'])
        if tags is None:
            continue

        if pyjq.first(tag_filter, tags) is not None:
            filtered_elbs.append(elb)

    return filtered_elbs
Esempio n. 5
0
def audit_s3_buckets(findings, region):
    buckets_json = query_aws(region.account, "s3-list-buckets", region)
    buckets = pyjq.all('.Buckets[].Name', buckets_json)
    for bucket in buckets:
        # Check policy
        try:
            policy_file_json = get_parameter_file(region, 's3', 'get-bucket-policy', bucket)
            if policy_file_json is not None:
                # Find the entity we need
                policy_string = policy_file_json['Policy']
                # Load the string value as json
                policy = json.loads(policy_string)
                policy = Policy(policy)
                if policy.is_internet_accessible():
                    if len(policy.statements) == 1 and len(policy.statements[0].actions) == 1 and 's3:GetObject' in policy.statements[0].actions:
                        findings.add(Finding(
                            region,
                            'S3_PUBLIC_POLICY_GETOBJECT_ONLY',
                            bucket))
                    else:
                        findings.add(Finding(
                            region,
                            'S3_PUBLIC_POLICY',
                            bucket,
                            resource_details=policy_string))
                        region, issue_id, resource_id, resource_details
        except Exception as e:
            findings.add(Finding(
                region,
                'EXCEPTION',
                bucket,
                resource_details={'policy': policy_string, 'exception': e, 'location': 'Exception checking policy of S3 bucket'}))
        # Check ACL
        try:
            file_json = get_parameter_file(region, 's3', 'get-bucket-acl', bucket)
            for grant in file_json['Grants']:
                uri = grant['Grantee'].get('URI', "")
                if (uri == 'http://acs.amazonaws.com/groups/global/AllUsers' or
                        uri == 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'):
                    findings.add(Finding(
                        region,
                        'S3_PUBLIC_ACL',
                        bucket,
                        resource_details=grant))
        except Exception as e:
            findings.add(Finding(
                region,
                'EXCEPTION',
                bucket,
                resource_details={'grant': grant, 'exception': e, 'location': 'Exception checking ACL of S3 bucket'}))
Esempio n. 6
0
def audit_sqs(region):
    # Check for publicly accessible sqs.
    json_blob = query_aws(region.account, "sqs-list-queues", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for queue in json_blob.get('QueueUrls', []):
        # Check policy
        attributes = get_parameter_file(region, 'sqs', 'get-queue-attributes',
                                        queue)
        if attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = attributes['Attributes']
        if 'Policy' in attributes:
            policy_string = attributes['Policy']
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            print('- Internet accessible SQS {}: {}'.format(
                name, policy_string))
Esempio n. 7
0
def audit_es(findings, region):
    json_blob = query_aws(region.account, 'es-list-domain-names', region)
    for domain in json_blob.get('DomainNames', []):
        name = domain['DomainName']

        # Check policy
        policy_file_json = get_parameter_file(region, 'es',
                                              'describe-elasticsearch-domain',
                                              name)
        # Find the entity we need
        policy_string = policy_file_json['DomainStatus']['AccessPolicies']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)

        # ES clusters or either public, with an "Endpoint" (singular), which is bad, or
        # they are VPC-only, in which case they have an "Endpoints" (plural) array containing a "vpc" element
        if policy_file_json['DomainStatus'].get(
                'Endpoint', '') != '' or policy_file_json['DomainStatus'].get(
                    'Endpoints', {}).get('vpc', '') == '':
            if policy.is_internet_accessible():
                findings.add(
                    Finding(region,
                            'ES_PUBLIC',
                            name,
                            resource_details=policy_string))
Esempio n. 8
0
def audit_sns(findings, region):
    # Check for publicly accessible sns.
    json_blob = query_aws(region.account, "sns-list-topics", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for topic in json_blob.get('Topics', []):
        # Check policy
        attributes = get_parameter_file(region, 'sns', 'get-topic-attributes', topic['TopicArn'])
        if attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = attributes['Attributes']
        if 'Policy' in attributes:
            policy_string = attributes['Policy']
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'SNS_PUBLIC',
                topic['TopicArn'],
                resource_details=policy_string))
Esempio n. 9
0
def audit_sqs(findings, region):
    # Check for publicly accessible sqs.
    json_blob = query_aws(region.account, "sqs-list-queues", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for queue in json_blob.get('QueueUrls', []):
        queue_name = queue.split("/")[-1]
        # Check policy
        queue_attributes = get_parameter_file(region, 'sqs', 'get-queue-attributes', queue)
        if queue_attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = queue_attributes['Attributes']
        if 'Policy' in attributes:
            policy_string = attributes['Policy']
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'SQS_PUBLIC',
                queue_name,
                resource_details=policy_string))
Esempio n. 10
0
def audit_glacier(findings, region):
    # Check for publicly accessible vaults.
    json_blob = query_aws(region.account, "glacier-list-vaults", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for vault in json_blob.get('VaultList', []):
        name = vault['VaultName']

        # Check policy
        policy_file_json = get_parameter_file(region, 'glacier', 'get-vault-access-policy', name)
        if policy_file_json is None:
            # No policy
            continue

        # Find the entity we need
        policy_string = policy_file_json['policy']['Policy']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'GLACIER_PUBLIC',
                name,
                resource_details=policy_string))
Esempio n. 11
0
def audit_kms(findings, region):
    # Check for publicly accessible KMS keys.
    json_blob = query_aws(region.account, "kms-list-keys", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for key in json_blob.get('Keys', []):
        name = key['KeyId']

        # Check policy
        policy_file_json = get_parameter_file(region, 'kms', 'get-key-policy', name)
        if policy_file_json is None:
            # No policy
            continue

        # Find the entity we need
        policy_string = policy_file_json['Policy']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(Finding(
                region,
                'KMS_PUBLIC',
                name,
                resource_details=policy_string))
Esempio n. 12
0
def audit_ebs_snapshots(findings, region):
    json_blob = query_aws(region.account, "ec2-describe-snapshots", region)
    for snapshot in json_blob['Snapshots']:
        try:
            file_json = get_parameter_file(region, 'ec2', 'describe-snapshot-attribute', snapshot['SnapshotId'])
            if file_json == None:
                # Not technically an exception, but an unexpected situation
                findings.add(Finding(
                    region,
                    'EXCEPTION',
                    snapshot,
                    resource_details={'location': 'EBS snapshot has no attributes'}))
                continue
            for attribute in file_json['CreateVolumePermissions']:
                if attribute.get('Group', 'self') != 'self':
                    findings.add(Finding(
                        region,
                        'EBS_SNAPSHOT_PUBLIC',
                        snapshot,
                        resource_details={'Entities allowed to restore': attribute['Group']}))
        except OSError:
            findings.add(Finding(
                region,
                'EXCEPTION',
                None,
                resource_details={
                    'location': 'Could not open EBS snapshot file',
                    'file_name': file_name}))
Esempio n. 13
0
def audit_rds_snapshots(findings, region):
    json_blob = query_aws(region.account, "rds-describe-db-snapshots", region)
    for snapshot in json_blob.get('DBSnapshots', []):
        try:
            file_json = get_parameter_file(region, 'rds',
                                           'describe-db-snapshot-attributes',
                                           snapshot['DBSnapshotIdentifier'])
            for attribute in file_json['DBSnapshotAttributesResult'][
                    'DBSnapshotAttributes']:
                if attribute['AttributeName'] == 'restore':
                    if "all" in attribute['AttributeValues']:
                        findings.add(
                            Finding(region,
                                    'RDS_PUBLIC_SNAPSHOT',
                                    snapshot,
                                    resource_details={
                                        'Entities allowed to restore':
                                        attribute['AttributeValues']
                                    }))
        except OSError:
            findings.add(
                Finding(region,
                        'EXCEPTION',
                        None,
                        resource_details={
                            'location': 'Could not open RDS snapshot file',
                            'file_name': file_name
                        }))
Esempio n. 14
0
def audit_ebs_snapshots(region):
    json_blob = query_aws(region.account, "ec2-describe-snapshots", region)
    for snapshot in json_blob['Snapshots']:
        try:
            file_json = get_parameter_file(region, 'ec2', 'describe-snapshot-attribute', snapshot['SnapshotId'])
            for attribute in file_json['CreateVolumePermissions']:
                if attribute['Group'] != 'self':
                    print('- EBS snapshot in {} is public: {}, entities allowed to restore: {}'.format(region.name, snapshot, attribute['Group']))
        except OSError:
            print('WARNING: Could not open {}'.format(file_name))
Esempio n. 15
0
def audit_rds_snapshots(region):
    json_blob = query_aws(region.account, "rds-describe-db-snapshots", region)
    for snapshot in json_blob.get('DBSnapshots', []):
        try:
            file_json = get_parameter_file(region, 'rds', 'describe-db-snapshot-attributes', snapshot['DBSnapshotIdentifier'])
            for attribute in file_json['DBSnapshotAttributesResult']['DBSnapshotAttributes']:
                if attribute['AttributeName'] == 'restore':
                    if "all" in attribute['AttributeValues']:
                        print('- RDS snapshot in {} is public: {}, entities allowed to restore: {}'.format(region.name, snapshot, attribute['AttributeValues']))
        except OSError:
            print('WARNING: Could not open {}'.format(file_name))
Esempio n. 16
0
def audit_ecr_repos(region):
    json_blob = query_aws(region.account, "ecr-describe-repositories", region)
    for repo in json_blob.get('repositories', []):
        name = repo['repositoryName']

        # Check policy
        policy_file_json = get_parameter_file(region, 'ecr', 'get-repository-policy', name)
        # Find the entity we need
        policy_string = policy_file_json['policyText']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            print('- Internet accessible ECR repo {}: {}'.format(name, policy_string))
Esempio n. 17
0
def audit_s3_block_policy(region):
    caller_identity_json = query_aws(region.account, "sts-get-caller-identity",
                                     region)
    block_policy_json = get_parameter_file(region, 's3control',
                                           'get-public-access-block',
                                           caller_identity_json['Account'])
    if block_policy_json is None:
        print('- S3 Control Access Block is not on')
    else:
        conf = block_policy_json['PublicAccessBlockConfiguration']
        if not conf['BlockPublicAcls'] or not conf[
                'BlockPublicPolicy'] or not conf[
                    'IgnorePublicAcls'] or not conf['RestrictPublicBuckets']:
            print('- S3 Control Access Block is not blocking all access: {}'.
                  format(block_policy_json))
Esempio n. 18
0
def audit_s3_block_policy(findings, region):
    caller_identity_json = query_aws(region.account, "sts-get-caller-identity", region)
    block_policy_json = get_parameter_file(region, 's3control', 'get-public-access-block', caller_identity_json['Account'])
    if block_policy_json is None:
        findings.add(Finding(
            region,
            'S3_ACCESS_BLOCK_OFF',
            None))
    else:
        conf = block_policy_json['PublicAccessBlockConfiguration']
        if not conf['BlockPublicAcls'] or not conf['BlockPublicPolicy'] or not conf['IgnorePublicAcls'] or not conf['RestrictPublicBuckets']:
            findings.add(Finding(
                region,
                'S3_ACCESS_BLOCK_ALL_ACCESS_TYPES',
                None,
                resource_details=block_policy_json))
Esempio n. 19
0
def audit_guardduty(findings, region):
    for region_json in get_regions(region.account):
        region = Region(region.account, region_json)
        detector_list_json = query_aws(region.account,
                                       "guardduty-list-detectors", region)
        if not detector_list_json:
            # GuardDuty must not exist in this region (or the collect data is old)
            continue
        is_enabled = False
        for detector in detector_list_json['DetectorIds']:
            detector_json = get_parameter_file(region, 'guardduty',
                                               'get-detector', detector)
            if detector_json['Status'] == 'ENABLED':
                is_enabled = True
        if not is_enabled:
            findings.add(Finding(region, 'GUARDDUTY_OFF', None, None))
Esempio n. 20
0
def audit_lambda(region):
    # Check for publicly accessible functions.  They should be called from apigateway or something else.
    json_blob = query_aws(region.account, "lambda-list-functions", region)
    for function in json_blob.get('Functions', []):
        name = function['FunctionName']

        # Check policy
        policy_file_json = get_parameter_file(region, 'lambda', 'get-policy', name)
        if policy_file_json is None:
            # No policy
            continue

        # Find the entity we need
        policy_string = policy_file_json['Policy']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            print('- Internet accessible Lambda {}: {}'.format(name, policy_string))
Esempio n. 21
0
def audit_ecr_repos(region):
    json_blob = query_aws(region.account, "ecr-describe-repositories", region)
    for repo in json_blob.get('repositories', []):
        name = repo['repositoryName']

        # Check policy
        policy_file_json = get_parameter_file(region, 'ecr',
                                              'get-repository-policy', name)
        if policy_file_json is None:
            # This means only the owner can access the repo, so this is fine.
            # The collect command would have received the exception
            # `RepositoryPolicyNotFoundException` for this to happen.
            continue
        # Find the entity we need
        policy_string = policy_file_json['policyText']
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            print('- Internet accessible ECR repo {}: {}'.format(
                name, policy_string))
Esempio n. 22
0
def audit_guardduty(region):
    regions_without = []
    possible_regions = 0
    for region_json in get_regions(region.account):
        region = Region(region.account, region_json)
        detector_list_json = query_aws(region.account,
                                       "guardduty-list-detectors", region)
        if not detector_list_json:
            # GuardDuty must not exist in this region (or the collect data is old)
            continue
        possible_regions += 1
        is_enabled = False
        for detector in detector_list_json['DetectorIds']:
            detector_json = get_parameter_file(region, 'guardduty',
                                               'get-detector', detector)
            if detector_json['Status'] == 'ENABLED':
                is_enabled = True
        if not is_enabled:
            regions_without.append(region.name)
    if len(regions_without) != 0:
        print('- GuardDuty not turned on for {}/{} regions: {}'.format(
            len(regions_without), possible_regions, regions_without))
Esempio n. 23
0
def get_rds_instances(subnet, outputfilter):
    instances = query_aws(subnet.account, "rds-describe-db-instances",
                          subnet.region)
    resource_filter = '.DBInstances[] | select(.DBSubnetGroup.Subnets != null and .DBSubnetGroup.Subnets[].SubnetIdentifier  == "{}")'
    rds_instances = pyjq.all(resource_filter.format(subnet.local_id),
                             instances)

    if 'tags' not in outputfilter:
        return rds_instances

    # There are tags requested, so we need to filter these
    tag_filter = ""
    tag_set_conditions = []
    for tag_set in outputfilter.get("tags", []):
        conditions = [c.split("=") for c in tag_set.split(",")]
        condition_queries = []
        for pair in conditions:
            if len(pair) == 2:
                condition_queries.append('.{} == "{}"'.format(
                    pair[0], pair[1]))
        tag_set_conditions.append('(' + ' and '.join(condition_queries) + ')')
    tag_filter = 'select(.TagList | from_entries | ' + ' or '.join(
        tag_set_conditions) + ')'

    filtered_instances = []
    for rds in rds_instances:
        tags = get_parameter_file(subnet.region, 'rds',
                                  'list-tags-for-resource',
                                  rds['DBInstanceArn'])
        if tags is None:
            continue

        if pyjq.first(tag_filter, tags) is not None:
            filtered_instances.append(rds)

    return filtered_instances