Exemple #1
0
def audit_rds_snapshots(findings, region):
    json_blob = query_aws(region.account, "rds-describe-db-snapshots", region)
    for snapshot in json_blob.get("DBSnapshots", []):
        try:
            file_json = get_parameter_file(
                region,
                "rds",
                "describe-db-snapshot-attributes",
                snapshot["DBSnapshotIdentifier"],
            )
            for attribute in file_json["DBSnapshotAttributesResult"][
                    "DBSnapshotAttributes"]:
                if attribute["AttributeName"] == "restore":
                    if "all" in attribute["AttributeValues"]:
                        findings.add(
                            Finding(
                                region,
                                "RDS_PUBLIC_SNAPSHOT",
                                snapshot,
                                resource_details={
                                    "Entities allowed to restore":
                                    attribute["AttributeValues"]
                                },
                            ))
        except OSError:
            findings.add(
                Finding(
                    region,
                    "EXCEPTION",
                    None,
                    resource_details={
                        "location": "Could not open RDS snapshot file",
                        "file_name": file_name,
                    },
                ))
Exemple #2
0
def audit_lightsail(findings, region):
    # Just check if lightsail is in use
    json_blob = query_aws(region.account, "lightsail-get-instances", region)
    if json_blob is None:
        # Service not supported in the region
        return
    if len(json_blob.get("instances", [])) > 0:
        findings.add(
            Finding(
                region,
                "LIGHTSAIL_IN_USE",
                None,
                resource_details={
                    "instance count": len(json_blob["instances"])
                },
            ))

    json_blob = query_aws(region.account, "lightsail-get-load-balancers",
                          region)
    if json_blob is None:
        # Service not supported in the region
        return
    if len(json_blob.get("loadBalancers", [])) > 0:
        findings.add(
            Finding(
                region,
                "LIGHTSAIL_IN_USE",
                None,
                resource_details={
                    "load balancer count": len(json_blob["loadBalancers"])
                },
            ))
Exemple #3
0
def audit_s3_buckets(findings, region):
    buckets_json = query_aws(region.account, "s3-list-buckets", region)
    buckets = pyjq.all('.Buckets[].Name', buckets_json)
    for bucket in buckets:
        # Check policy
        try:
            policy_file_json = get_parameter_file(region, 's3',
                                                  'get-bucket-policy', bucket)
            if policy_file_json is not None:
                # Find the entity we need
                policy_string = policy_file_json['Policy']
                # Load the string value as json
                policy = json.loads(policy_string)
                policy = Policy(policy)
                if policy.is_internet_accessible():
                    if len(policy.statements) == 1 and len(
                            policy.statements[0].actions
                    ) == 1 and 's3:GetObject' in policy.statements[0].actions:
                        findings.add(
                            Finding(region, 'S3_PUBLIC_POLICY_GETOBJECT_ONLY',
                                    bucket))
                    else:
                        findings.add(
                            Finding(region,
                                    'S3_PUBLIC_POLICY',
                                    bucket,
                                    resource_details=policy_string))
                        #region, issue_id, resource_id, resource_details
        except Exception as e:
            raise e
        #findings.add(Finding(
        #        region,
        #        'EXCEPTION',
        #        bucket,
        #        resource_details={'policy': policy_string, 'exception': e, 'location': 'Exception checking policy of S3 bucket'}))
        # Check ACL
        try:
            file_json = get_parameter_file(region, 's3', 'get-bucket-acl',
                                           bucket)
            for grant in file_json['Grants']:
                uri = grant['Grantee'].get('URI', "")
                if (uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
                        or uri ==
                        'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
                    ):
                    findings.add(
                        Finding(region,
                                'S3_PUBLIC_ACL',
                                bucket,
                                resource_details=grant))
        except Exception as e:
            findings.add(
                Finding(region,
                        'EXCEPTION',
                        bucket,
                        resource_details={
                            'grant': grant,
                            'exception': e,
                            'location': 'Exception checking ACL of S3 bucket'
                        }))
Exemple #4
0
def audit_ec2(findings, region):
    json_blob = query_aws(region.account, 'ec2-describe-instances', region)
    route_table_json = query_aws(region.account, 'ec2-describe-route-tables',
                                 region)

    for reservation in json_blob.get('Reservations', []):
        for instance in reservation.get('Instances', []):
            if instance.get('State', {}).get('Name', '') == 'terminated':
                # Ignore EC2's that are off
                continue

            if 'vpc' not in instance.get('VpcId', ''):
                findings.add(
                    Finding(region, 'EC2_CLASSIC', instance['InstanceId']))

            if not instance.get('SourceDestCheck', True):
                route_to_instance = None
                for table in route_table_json['RouteTables']:
                    if table['VpcId'] == instance.get('VpcId', ''):
                        for route in table['Routes']:
                            if route.get('InstanceId',
                                         '') == instance['InstanceId']:
                                route_to_instance = route
                                break
                    if route_to_instance is not None:
                        break
                findings.add(
                    Finding(region,
                            'EC2_SOURCE_DEST_CHECK_OFF',
                            instance['InstanceId'],
                            resource_details={'routes': route_to_instance}))
Exemple #5
0
def audit_route53(findings, region):
    json_blob = query_aws(region.account, "route53domains-list-domains",
                          region)
    for domain in json_blob.get("Domains", []):
        if not domain["AutoRenew"]:
            findings.add(
                Finding(region, "DOMAIN_NOT_SET_TO_RENEW",
                        domain["DomainName"], None))
        if not domain["TransferLock"]:
            findings.add(
                Finding(region, "DOMAIN_HAS_NO_TRANSFER_LOCK",
                        domain["DomainName"], None))

    # Check VPC hosted zones
    regions_json = query_aws(region.account, "describe-regions")
    regions = pyjq.all(".Regions[].RegionName", regions_json)
    for region_name in regions:
        vpc_json = query_aws(region.account, "ec2-describe-vpcs", region_name)
        vpcs = pyjq.all(".Vpcs[]?.VpcId", vpc_json)
        for vpc in vpcs:
            hosted_zone_file = f"account-data/{region.account.name}/{region.name}/route53-list-hosted-zones-by-vpc/{region_name}/{vpc}"
            hosted_zones_json = json.load(open(hosted_zone_file))
            hosted_zones = pyjq.all(".HostedZoneSummaries[]?",
                                    hosted_zones_json)
            for hosted_zone in hosted_zones:
                if hosted_zone.get("Owner", {}).get("OwningAccount", "") != "":
                    if hosted_zone["Owner"][
                            "OwningAccount"] != region.account.local_id:
                        findings.add(
                            Finding(region, "FOREIGN_HOSTED_ZONE",
                                    hosted_zone))
Exemple #6
0
def audit_ec2(findings, region):
    json_blob = query_aws(region.account, "ec2-describe-instances", region)
    route_table_json = query_aws(region.account, "ec2-describe-route-tables",
                                 region)

    for reservation in json_blob.get("Reservations", []):
        for instance in reservation.get("Instances", []):
            if instance.get("State", {}).get("Name", "") == "terminated":
                # Ignore EC2's that are off
                continue

            if "vpc" not in instance.get("VpcId", ""):
                findings.add(
                    Finding(region, "EC2_CLASSIC", instance["InstanceId"]))

            if not instance.get("SourceDestCheck", True):
                route_to_instance = None
                for table in route_table_json["RouteTables"]:
                    if table["VpcId"] == instance.get("VpcId", ""):
                        for route in table["Routes"]:
                            if route.get("InstanceId",
                                         "") == instance["InstanceId"]:
                                route_to_instance = route
                                break
                    if route_to_instance is not None:
                        break
                findings.add(
                    Finding(
                        region,
                        "EC2_SOURCE_DEST_CHECK_OFF",
                        instance["InstanceId"],
                        resource_details={"routes": route_to_instance},
                    ))
Exemple #7
0
def audit_rds_snapshots(findings, region):
    json_blob = query_aws(region.account, "rds-describe-db-snapshots", region)
    for snapshot in json_blob.get('DBSnapshots', []):
        try:
            file_json = get_parameter_file(region, 'rds',
                                           'describe-db-snapshot-attributes',
                                           snapshot['DBSnapshotIdentifier'])
            for attribute in file_json['DBSnapshotAttributesResult'][
                    'DBSnapshotAttributes']:
                if attribute['AttributeName'] == 'restore':
                    if "all" in attribute['AttributeValues']:
                        findings.add(
                            Finding(region,
                                    'RDS_PUBLIC_SNAPSHOT',
                                    snapshot,
                                    resource_details={
                                        'Entities allowed to restore':
                                        attribute['AttributeValues']
                                    }))
        except OSError:
            findings.add(
                Finding(region,
                        'EXCEPTION',
                        None,
                        resource_details={
                            'location': 'Could not open RDS snapshot file',
                            'file_name': file_name
                        }))
Exemple #8
0
def audit_ec2(findings, region):
    json_blob = query_aws(region.account, "ec2-describe-instances", region)
    route_table_json = query_aws(region.account, "ec2-describe-route-tables",
                                 region)

    for reservation in json_blob.get("Reservations", []):
        for instance in reservation.get("Instances", []):
            if instance.get("State", {}).get("Name", "") == "terminated":
                # Ignore EC2's that are off
                continue

            # Check for old instances
            if instance.get("LaunchTime", "") != "":
                MAX_RESOURCE_AGE_DAYS = 365
                collection_date = get_collection_date(region.account)
                launch_time = instance["LaunchTime"].split(".")[0]
                age_in_days = days_between(launch_time, collection_date)
                if age_in_days > MAX_RESOURCE_AGE_DAYS:
                    findings.add(
                        Finding(
                            region,
                            "EC2_OLD",
                            instance["InstanceId"],
                            resource_details={
                                "Age in days": age_in_days,
                                "Name": get_name(instance, "InstanceId"),
                                "Tags": instance.get("Tags", {}),
                            },
                        ))

            # Check for EC2 Classic
            if "vpc" not in instance.get("VpcId", ""):
                findings.add(
                    Finding(region, "EC2_CLASSIC", instance["InstanceId"]))

            if not instance.get("SourceDestCheck", True):
                route_to_instance = None
                for table in route_table_json["RouteTables"]:
                    if table["VpcId"] == instance.get("VpcId", ""):
                        for route in table["Routes"]:
                            if route.get("InstanceId",
                                         "") == instance["InstanceId"]:
                                route_to_instance = route
                                break
                    if route_to_instance is not None:
                        break
                findings.add(
                    Finding(
                        region,
                        "EC2_SOURCE_DEST_CHECK_OFF",
                        instance["InstanceId"],
                        resource_details={
                            "routes": route_to_instance,
                            "Name": get_name(instance, "InstanceId"),
                            "Tags": instance.get("Tags", {}),
                        },
                    ))
Exemple #9
0
def is_admin_policy(
    policy_doc, location, findings, region, privs_to_look_for, include_retricted
):
    # This attempts to identify policies that directly allow admin privs, or indirectly through possible
    # privilege escalation (ex. iam:PutRolePolicy to add an admin policy to itself).
    # It is a best effort. It will have false negatives, meaning it may not identify an admin policy
    # when it should, and may have false positives.
    for stmt in make_list(policy_doc["Statement"]):
        if stmt["Effect"] == "Allow":
            # Check for use of NotAction, if they are allowing everything except a set, with no restrictions,
            # this is bad.
            not_actions = make_list(stmt.get("NotAction", []))
            if (
                not_actions != []
                and stmt.get("Resource", "") == "*"
                and stmt.get("Condition", "") == ""
            ):
                if "iam:*" in not_actions:
                    # This is used for PowerUsers, where they can do everything except IAM actions
                    return False
                findings.add(
                    Finding(
                        region,
                        "IAM_NOTACTION_ALLOW",
                        location,
                        resource_details={"Statement": stmt},
                    )
                )
                return True

            actions = make_list(stmt.get("Action", []))
            for action in actions:
                if action == "*" or action == "*:*" or action == "iam:*":
                    if stmt.get("Resource", "") != "*":
                        findings.add(
                            Finding(
                                region,
                                "IAM_UNEXPECTED_FORMAT",
                                location,
                                resource_details={
                                    "comment": "This policy is oddly allowing all actions, but is restricted to a specific resource. This is a confusing way of restricting access that may be more privileged than expected.",
                                    "statement": stmt,
                                },
                            )
                        )
                    return True
                # Look for privilege escalations
                if action_matches(action, privs_to_look_for):
                    if include_retricted:
                        return True
                    elif (
                        stmt.get("Resource", "") == "*"
                        and stmt.get("Condition", "") == ""
                    ):
                        return True

    return False
Exemple #10
0
def audit_rds(findings, region):
    json_blob = query_aws(region.account, "rds-describe-db-instances", region)
    for instance in json_blob.get("DBInstances", []):
        if instance["PubliclyAccessible"]:
            findings.add(
                Finding(region, "RDS_PUBLIC_IP",
                        instance["DBInstanceIdentifier"]))
        if instance.get("DBSubnetGroup", {}).get("VpcId", "") == "":
            findings.add(
                Finding(region, "RDS_VPC_CLASSIC",
                        instance["DBInstanceIdentifier"]))
Exemple #11
0
def audit_rds(findings, region):
    json_blob = query_aws(region.account, "rds-describe-db-instances", region)
    for instance in json_blob.get('DBInstances', []):
        if instance['PubliclyAccessible']:
            findings.add(
                Finding(region, 'RDS_PUBLIC_IP',
                        instance['DBInstanceIdentifier']))
        if instance.get('DBSubnetGroup', {}).get('VpcId', '') == '':
            findings.add(
                Finding(region, 'RDS_VPC_CLASSIC',
                        instance['DBInstanceIdentifier']))
Exemple #12
0
def audit_cloudtrail(findings, region):
    json_blob = query_aws(region.account, "cloudtrail-describe-trails", region)
    if len(json_blob["trailList"]) == 0:
        findings.add(Finding(region, "CLOUDTRAIL_OFF", None, None))
    else:
        multiregion = False
        for trail in json_blob["trailList"]:
            if trail["IsMultiRegionTrail"]:
                multiregion = True
                break
        if not multiregion:
            findings.add(Finding(region, "CLOUDTRAIL_NOT_MULTIREGION", None, None))
Exemple #13
0
def audit_route53(findings, region):
    json_blob = query_aws(region.account, "route53domains-list-domains",
                          region)
    for domain in json_blob.get("Domains", []):
        if not domain["AutoRenew"]:
            findings.add(
                Finding(region, "DOMAIN_NOT_SET_TO_RENEW",
                        domain["DomainName"], None))
        if not domain["TransferLock"]:
            findings.add(
                Finding(region, "DOMAIN_HAS_NO_TRANSFER_LOCK",
                        domain["DomainName"], None))
Exemple #14
0
def audit_route53(findings, region):
    json_blob = query_aws(region.account, "route53domains-list-domains",
                          region)
    for domain in json_blob.get('Domains', []):
        if not domain['AutoRenew']:
            findings.add(
                Finding(region, 'DOMAIN_NOT_SET_TO_RENEW',
                        domain['DomainName'], None))
        if not domain['TransferLock']:
            findings.add(
                Finding(region, 'DOMAIN_HAS_NO_TRANSFER_LOCK',
                        domain['DomainName'], None))
Exemple #15
0
def audit_glacier(findings, region):
    # Check for publicly accessible vaults.
    json_blob = query_aws(region.account, "glacier-list-vaults", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for vault in json_blob.get("VaultList", []):
        name = vault["VaultName"]

        # Check policy
        policy_file_json = get_parameter_file(region, "glacier",
                                              "get-vault-access-policy", name)
        if policy_file_json is None:
            # No policy
            continue

        # Find the entity we need
        policy_string = policy_file_json["policy"]["Policy"]
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(
                Finding(region,
                        "GLACIER_PUBLIC",
                        name,
                        resource_details=policy_string))
Exemple #16
0
def audit_sns(findings, region):
    # Check for publicly accessible sns.
    json_blob = query_aws(region.account, "sns-list-topics", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for topic in json_blob.get("Topics", []):
        # Check policy
        attributes = get_parameter_file(region, "sns", "get-topic-attributes",
                                        topic["TopicArn"])
        if attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = attributes["Attributes"]
        if "Policy" in attributes:
            policy_string = attributes["Policy"]
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(
                Finding(
                    region,
                    "SNS_PUBLIC",
                    topic["TopicArn"],
                    resource_details=policy_string,
                ))
Exemple #17
0
def audit_es(findings, region):
    json_blob = query_aws(region.account, "es-list-domain-names", region)
    for domain in json_blob.get("DomainNames", []):
        name = domain["DomainName"]

        # Check policy
        policy_file_json = get_parameter_file(region, "es",
                                              "describe-elasticsearch-domain",
                                              name)
        # Find the entity we need
        policy_string = policy_file_json["DomainStatus"]["AccessPolicies"]
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)

        # ES clusters or either public, with an "Endpoint" (singular), which is bad, or
        # they are VPC-only, in which case they have an "Endpoints" (plural) array containing a "vpc" element
        if (policy_file_json["DomainStatus"].get("Endpoint", "") != ""
                or policy_file_json["DomainStatus"].get("Endpoints", {}).get(
                    "vpc", "") == ""):
            if policy.is_internet_accessible():
                findings.add(
                    Finding(region,
                            "ES_PUBLIC",
                            name,
                            resource_details=policy_string))
Exemple #18
0
def audit_sqs(findings, region):
    # Check for publicly accessible sqs.
    json_blob = query_aws(region.account, "sqs-list-queues", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for queue in json_blob.get("QueueUrls", []):
        queue_name = queue.split("/")[-1]
        # Check policy
        queue_attributes = get_parameter_file(region, "sqs",
                                              "get-queue-attributes", queue)
        if queue_attributes is None:
            # No policy
            continue

        # Find the entity we need
        attributes = queue_attributes["Attributes"]
        if "Policy" in attributes:
            policy_string = attributes["Policy"]
        else:
            # No policy set
            continue

        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(
                Finding(region,
                        "SQS_PUBLIC",
                        queue_name,
                        resource_details=policy_string))
Exemple #19
0
def audit_redshift(findings, region):
    json_blob = query_aws(region.account, "redshift-describe-clusters", region)
    for cluster in json_blob.get("Clusters", []):
        if cluster["PubliclyAccessible"]:
            findings.add(
                Finding(region, "REDSHIFT_PUBLIC_IP",
                        cluster["ClusterIdentifier"]))
Exemple #20
0
def audit_kms(findings, region):
    # Check for publicly accessible KMS keys.
    json_blob = query_aws(region.account, "kms-list-keys", region)
    if json_blob is None:
        # Service not supported in the region
        return

    for key in json_blob.get("Keys", []):
        name = key["KeyId"]

        # Check policy
        policy_file_json = get_parameter_file(region, "kms", "get-key-policy",
                                              name)
        if policy_file_json is None:
            # No policy
            continue

        # Find the entity we need
        policy_string = policy_file_json["Policy"]
        # Load the string value as json
        policy = json.loads(policy_string)
        policy = Policy(policy)
        if policy.is_internet_accessible():
            findings.add(
                Finding(region,
                        "KMS_PUBLIC",
                        name,
                        resource_details=policy_string))
Exemple #21
0
def audit_s3_block_policy(findings, region):
    caller_identity_json = query_aws(region.account, "sts-get-caller-identity",
                                     region)
    block_policy_json = get_parameter_file(region, 's3control',
                                           'get-public-access-block',
                                           caller_identity_json['Account'])
    if block_policy_json is None:
        findings.add(Finding(region, 'S3_ACCESS_BLOCK_OFF', None))
    else:
        conf = block_policy_json['PublicAccessBlockConfiguration']
        if not conf['BlockPublicAcls'] or not conf[
                'BlockPublicPolicy'] or not conf[
                    'IgnorePublicAcls'] or not conf['RestrictPublicBuckets']:
            findings.add(
                Finding(region,
                        'S3_ACCESS_BLOCK_ALL_ACCESS_TYPES',
                        None,
                        resource_details=block_policy_json))
Exemple #22
0
def audit_root_user(findings, region):
    json_blob = query_aws(region.account, "iam-get-account-summary", region)

    root_user_access_keys = json_blob.get('SummaryMap',
                                          {}).get('AccountAccessKeysPresent',
                                                  0)
    if root_user_access_keys != 0:
        findings.add(
            Finding(region,
                    'ROOT_USER_HAS_ACCESS_KEYS',
                    None,
                    resource_details={
                        'Number of access keys': root_user_access_keys
                    }))

    root_user_mfa = json_blob.get('SummaryMap', {}).get('AccountMFAEnabled', 0)
    if root_user_mfa != 1:
        findings.add(Finding(region, 'ROOT_USER_HAS_NO_MFA', None, None))
Exemple #23
0
def audit_root_user(findings, region):
    json_blob = query_aws(region.account, "iam-get-account-summary", region)

    root_user_access_keys = json_blob.get("SummaryMap", {}).get(
        "AccountAccessKeysPresent", 0
    )
    if root_user_access_keys != 0:
        findings.add(
            Finding(
                region,
                "ROOT_USER_HAS_ACCESS_KEYS",
                None,
                resource_details={"Number of access keys": root_user_access_keys},
            )
        )

    root_user_mfa = json_blob.get("SummaryMap", {}).get("AccountMFAEnabled", 0)
    if root_user_mfa != 1:
        findings.add(Finding(region, "ROOT_USER_HAS_NO_MFA", None, None))
Exemple #24
0
def audit_ebs_snapshots(findings, region):
    json_blob = query_aws(region.account, "ec2-describe-snapshots", region)
    for snapshot in json_blob["Snapshots"]:
        try:
            file_json = get_parameter_file(region, "ec2",
                                           "describe-snapshot-attribute",
                                           snapshot["SnapshotId"])
            if file_json is None:
                # Not technically an exception, but an unexpected situation
                findings.add(
                    Finding(
                        region,
                        "EXCEPTION",
                        snapshot,
                        resource_details={
                            "location": "EBS snapshot has no attributes"
                        },
                    ))
                continue
            for attribute in file_json["CreateVolumePermissions"]:
                if attribute.get("Group", "self") != "self":
                    findings.add(
                        Finding(
                            region,
                            "EBS_SNAPSHOT_PUBLIC",
                            snapshot,
                            resource_details={
                                "Entities allowed to restore":
                                attribute["Group"]
                            },
                        ))
        except OSError:
            findings.add(
                Finding(
                    region,
                    "EXCEPTION",
                    None,
                    resource_details={
                        "location": "Could not open EBS snapshot file",
                        "file_name": file_name,
                    },
                ))
Exemple #25
0
def audit_s3_block_policy(findings, region):
    caller_identity_json = query_aws(region.account, "sts-get-caller-identity",
                                     region)
    block_policy_json = get_parameter_file(region, "s3control",
                                           "get-public-access-block",
                                           caller_identity_json["Account"])
    if block_policy_json is None:
        findings.add(Finding(region, "S3_ACCESS_BLOCK_OFF", None))
    else:
        conf = block_policy_json["PublicAccessBlockConfiguration"]
        if (not conf["BlockPublicAcls"] or not conf["BlockPublicPolicy"]
                or not conf["IgnorePublicAcls"]
                or not conf["RestrictPublicBuckets"]):
            findings.add(
                Finding(
                    region,
                    "S3_ACCESS_BLOCK_ALL_ACCESS_TYPES",
                    None,
                    resource_details=block_policy_json,
                ))
Exemple #26
0
def check_for_bad_policy(findings, region, arn, policy_text):
    for statement in make_list(policy_text["Statement"]):
        # Checking for signatures of the bad MFA policy from
        # https://web.archive.org/web/20170602002425/https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_users-self-manage-mfa-and-creds.html
        # and
        # https://github.com/awsdocs/iam-user-guide/blob/cfe14c674c494d07ba0ab952fe546fdd587da65d/doc_source/id_credentials_mfa_enable_virtual.md#permissions-required
        if (
            statement.get("Sid", "") == "AllowIndividualUserToManageTheirOwnMFA"
            or statement.get("Sid", "")
            == "AllowIndividualUserToViewAndManageTheirOwnMFA"
        ):
            if "iam:DeactivateMFADevice" in make_list(statement.get("Action", [])):
                findings.add(Finding(region, "IAM_BAD_MFA_POLICY", arn, policy_text))
                return
        elif (
            statement.get("Sid", "")
            == "BlockAnyAccessOtherThanAboveUnlessSignedInWithMFA"
        ):
            if "iam:*" in make_list(statement.get("NotAction", [])):
                findings.add(Finding(region, "IAM_BAD_MFA_POLICY", arn, policy_text))
                return
Exemple #27
0
def audit_accessanalyzer(findings, region):
    analyzer_list_json = query_aws(region.account,
                                   "accessanalyzer-list-analyzers", region)
    if not analyzer_list_json:
        # Access Analyzer must not exist in this region (or the collect data is old)
        return
    is_enabled = False
    for analyzer in analyzer_list_json["analyzers"]:
        if analyzer["status"] == "ACTIVE":
            is_enabled = True
    if not is_enabled:
        findings.add(Finding(region, "ACCESSANALYZER_OFF", None, None))
Exemple #28
0
def audit_password_policy(findings, region):
    json_blob = query_aws(region.account, "iam-get-account-password-policy",
                          region)
    if json_blob is None or json_blob.get("PasswordPolicy", {}) == {}:
        findings.add(Finding(region, "PASSWORD_POLICY_NOT_SET", None, None))
    else:
        if json_blob["PasswordPolicy"].get("MinimumPasswordLength", 0) < 12:
            findings.add(
                Finding(
                    region,
                    "PASSWORD_POLICY_CHARACTER_MINIMUM",
                    None,
                    resource_details={
                        "MinimumPasswordLength":
                        json_blob["PasswordPolicy"].get(
                            "MinimumPasswordLength", 0)
                    },
                ))

        lacking_character_requirements = []
        if not json_blob["PasswordPolicy"].get("RequireNumbers", False):
            lacking_character_requirements.append("RequireNumbers")
        if not json_blob["PasswordPolicy"].get("RequireSymbols", False):
            lacking_character_requirements.append("RequireSymbols")
        if not json_blob["PasswordPolicy"].get("RequireLowercaseCharacters",
                                               False):
            lacking_character_requirements.append("RequireLowercaseCharacters")
        if not json_blob["PasswordPolicy"].get("RequireUppercaseCharacters",
                                               False):
            lacking_character_requirements.append("RequireUppercaseCharacters")
        if len(lacking_character_requirements) > 0:
            findings.add(
                Finding(
                    region,
                    "PASSWORD_POLICY_CHARACTER_SET_REQUIREMENTS",
                    None,
                    resource_details={
                        "Policy lacks": lacking_character_requirements
                    },
                ))
Exemple #29
0
def audit_guardduty(findings, region):
    detector_list_json = query_aws(region.account, "guardduty-list-detectors",
                                   region)
    if not detector_list_json:
        # GuardDuty must not exist in this region (or the collect data is old)
        return
    is_enabled = False
    for detector in detector_list_json["DetectorIds"]:
        detector_json = get_parameter_file(region, "guardduty", "get-detector",
                                           detector)
        if detector_json["Status"] == "ENABLED":
            is_enabled = True
    if not is_enabled:
        findings.add(Finding(region, "GUARDDUTY_OFF", None, None))
Exemple #30
0
def audit_ebs_snapshots(findings, region):
    json_blob = query_aws(region.account, "ec2-describe-snapshots", region)
    for snapshot in json_blob['Snapshots']:
        try:
            file_json = get_parameter_file(region, 'ec2',
                                           'describe-snapshot-attribute',
                                           snapshot['SnapshotId'])
            if file_json is None:
                # Not technically an exception, but an unexpected situation
                findings.add(
                    Finding(region,
                            'EXCEPTION',
                            snapshot,
                            resource_details={
                                'location': 'EBS snapshot has no attributes'
                            }))
                continue
            for attribute in file_json['CreateVolumePermissions']:
                if attribute.get('Group', 'self') != 'self':
                    findings.add(
                        Finding(region,
                                'EBS_SNAPSHOT_PUBLIC',
                                snapshot,
                                resource_details={
                                    'Entities allowed to restore':
                                    attribute['Group']
                                }))
        except OSError:
            findings.add(
                Finding(region,
                        'EXCEPTION',
                        None,
                        resource_details={
                            'location': 'Could not open EBS snapshot file',
                            'file_name': file_name
                        }))