def run(self, context): trails = [] log_validation_disabled = [] trailcount = 0 for region in context.regions: region_client = client(context, region_name=region) for trail in region_client.describe_trails()["trailList"]: if trail["TrailARN"] in trails: continue trails.append(trail["TrailARN"]) friendly_name = f'{trail["HomeRegion"]}:trail/{trail["Name"]}' if not trail["LogFileValidationEnabled"]: log_validation_disabled.append(friendly_name) if len(log_validation_disabled): return [ Finding( context.state, "CloudTrail log validation disabled", "LOW", trails=log_validation_disabled, ) ] return []
def run(self, context): findings = [] total_volumes = 0 unencrypted_volumes = 0 # { 'region': [instances, affected] } unencrypted = {} for region in context.regions: region_client = client(context, region_name=region) paginator = region_client.get_paginator("describe_volumes") for page in paginator.paginate(): for volume in page["Volumes"]: total_volumes += 1 if not volume["Encrypted"]: unencrypted_volumes += 1 if region not in unencrypted: unencrypted[region] = [] unencrypted[region].append(volume["VolumeId"]) if unencrypted_volumes: finding = Finding( context.state, "EBS block storage volumes without encryption", "MEDIUM", count_total=total_volumes, count_unenc=unencrypted_volumes, unenc_volumes=unencrypted, ) findings.append(finding) return findings
def run(self, context): findings = [] cluster_count = 0 public_count = 0 public = {} for region in context.regions: eks = client(context, region_name=region) for cluster_name in list_clusters(context, region): cluster_count += 1 cluster = eks.describe_cluster(name=cluster_name)["cluster"] if cluster["resourcesVpcConfig"]["endpointPublicAccess"]: public_count += 1 if region not in public: public[region] = [] public[region].append( {"name": cluster_name, "endpoint": cluster["endpoint"]} ) if public_count: findings.append( Finding( context.state, "EKS API server endpoints are publicly accessible", "MEDIUM", cluster_count=cluster_count, public_count=public_count, instances=public, ) ) return findings
def run(self, context): findings = [] total_images = 0 public_images = 0 images = {} for region in context.regions: region_client = client(context, region_name=region) for image in region_client.describe_images( Owners=["self"])["Images"]: total_images += 1 if image["Public"]: public_images += 1 if region not in images: images[region] = [] images[region].append(image["ImageId"]) if public_images: finding = Finding( context.state, "Public Amazon Machine Images", "MEDIUM", count_total=total_images, count_public=public_images, public_images=images, ) findings.append(finding) return findings
def run(self, context): s3 = client(context) without = [] for bucket in get_all_buckets(context): try: # Most operations are region independent, but sometimes you # have to request encryption from the region the bucket # resides in location = s3.get_bucket_location( Bucket=bucket)["LocationConstraint"] if location: s3 = client(context, region_name=location) enc = s3.get_bucket_encryption( Bucket=bucket)["ServerSideEncryptionConfiguration"] # If we get this far, there should be defined encryption :good: except ClientError as err: # Best way I've found to handle the error raised when a bucket # has no encryption settings defined, which is common if err.response["Error"]["Code"] != ENC_NOT_FOUND: raise err else: without.append(bucket) if len(without): return [ Finding( context.state, "S3 buckets without encryption", "MEDIUM", buckets=without, ) ] return []
def run(self, context): results = [] for region in context.regions: print(f"Parsing region {region}") try: region_secgroup_ipperms_map = {} for g in get_region_secgroups(context, region): # Create a lookup map between groups and corresponding rules region_secgroup_ipperms_map[ g["GroupId"]] = g["IpPermissions"] for instance in get_region_running_instances( context, region): # We only care about EC2 instances with public IPs and attached security groups if (instance.public_ip_address is not None and instance.security_groups is not None): # Look up instance's rules by finding the security group in the lookup map for instance_group in instance.security_groups: if (instance_group["GroupId"] in region_secgroup_ipperms_map.keys()): for rule in region_secgroup_ipperms_map[ instance_group["GroupId"]]: for iprange in rule["IpRanges"]: if (iprange.get("CidrIpv6", "") == "::/0" or iprange.get( "CidrIp", "") == "0.0.0.0/0" ) and rule["IpProtocol"] in ( "tcp", "udp"): entry = { "Region": region, "GroupId": instance_group["GroupId"], "InstanceId": instance.instance_id, "PublicIpAddress": instance.public_ip_address, "ToPort": rule["ToPort"], } if entry not in results: results.append(entry) except Exception as e: print(e) if len(results) > 0: return [ Finding( context.state, "Instances with open security groups", "INFO", results=results, ) ] return []
def run(self, context): findings = [] accesslogs_disabled = {} elb_count = 0 disabled_count = 0 for region in context.regions: elb = client(context, region_name=region) for page in elb.get_paginator("describe_load_balancers").paginate(): for lb in page["LoadBalancerDescriptions"]: elb_count += 1 name = lb["LoadBalancerName"] # Verbose much? attribs = elb.describe_load_balancer_attributes( LoadBalancerName=name )["LoadBalancerAttributes"] if not attribs["AccessLog"]["Enabled"]: disabled_count += 1 if region not in accesslogs_disabled: accesslogs_disabled[region] = [] accesslogs_disabled[region].append(name) elbv2 = v2_client(context, region_name=region) # Same thing, for v2 ELBs for page in elbv2.get_paginator("describe_load_balancers").paginate(): for lb in page["LoadBalancers"]: elb_count += 1 name = lb["LoadBalancerName"] arn = lb["LoadBalancerArn"] attribs = elbv2.describe_load_balancer_attributes( LoadBalancerArn=arn ) for attrib in attribs["Attributes"]: key = attrib["Key"] value = attrib["Value"] if key == "access_logs.s3.enabled" and value == "false": disabled_count += 1 if region not in accesslogs_disabled: accesslogs_disabled[region] = [] accesslogs_disabled[region].append(name) if disabled_count: findings.append( Finding( context.state, "ELB Instances with Access Logs disabled", "LOW", elb_count=elb_count, disabled_count=disabled_count, elbs=accesslogs_disabled, ) ) return findings
def run(self, context): regions_without_trails = [] for region in context.regions: region_client = client(context, region_name=region) if not len(region_client.describe_trails()["trailList"]): regions_without_trails.append(region) if len(regions_without_trails): return [ Finding( context.state, "CloudTrail not in use", "MEDIUM", regions=regions_without_trails, ) ] return []
def run(self, context): findings = [] dltpt_disabled = {} elb_count = 0 disabled_count = 0 for region in context.regions: elbv2 = v2_client(context, region_name=region) for page in elbv2.get_paginator("describe_load_balancers").paginate(): for lb in page["LoadBalancers"]: elb_count += 1 name = lb["LoadBalancerName"] arn = lb["LoadBalancerArn"] attribs = elbv2.describe_load_balancer_attributes( LoadBalancerArn=arn ) for attrib in attribs["Attributes"]: key = attrib["Key"] value = attrib["Value"] if key == "deletion_protection.enabled" and value == "false": disabled_count += 1 if region not in dltpt_disabled: dltpt_disabled[region] = [] dltpt_disabled[region].append(name) if disabled_count: findings.append( Finding( context.state, "ELBv2 Instances with Delete Protection disabled", "LOW", elb_count=elb_count, disabled_count=disabled_count, elbs=dltpt_disabled, ) ) return findings
def run(self, context): findings = [] rds_count = 0 flagged_rds_count = 0 flagged = {} for region in context.regions: rds = client(context, region_name=region) for db in describe_db_instances(context, region): rds_count += 1 target_value = db[self.name] # Cluster properties take precedence. if "DBClusterIdentifier" in db: cluster = describe_db_cluster( context, region, db["DBClusterIdentifier"] ) if self.name in cluster: target_value = cluster[self.name] if db[self.name] == target_value: flagged_rds_count += 1 if region not in flagged: flagged[region] = [] flagged[region].append(db["DBInstanceIdentifier"]) if flagged_rds_count: findings.append( Finding( context.state, self.title, "LOW", rds_count=rds_count, flagged_rds_count=flagged_rds_count, instances=flagged, ) ) return findings
def run(self, context): findings = [] cluster_count = 0 public_count = 0 public = {} # First find any cluster that is `PubliclyAccessibble`. for region in context.regions: redshift = client(context, region_name=region) for page in redshift.get_paginator("describe_clusters").paginate(): for cluster in page["Clusters"]: cluster_count += 1 if cluster["PubliclyAccessible"]: public_count += 1 if region not in public: public[region] = [] cluster_id = cluster["ClusterIdentifier"] port = cluster["Endpoint"]["Port"] security_groups = { group["VpcSecurityGroupId"]: [] for group in cluster["VpcSecurityGroups"] } public[region].append( { "id": cluster_id, "port": port, "security_groups": security_groups, } ) # Next see if there are security groups in place for the found clusters. severity = "LOW" for region in public: for cluster in public[region]: for group in get_region_secgroups(context, region): group_id = group["GroupId"] if group_id not in cluster["security_groups"]: continue for permission in group["IpPermissions"]: if permission["IpRanges"] == []: continue if permission["FromPort"] != cluster["port"]: continue for ip in permission["IpRanges"]: cidr_ip = ip["CidrIp"] if cidr_ip == "0.0.0.0/0": severity = "HIGH" cluster["security_groups"][group_id].append( {"source": cidr_ip, "class": "Internet"} ) elif context.is_aws_cidr(cidr_ip): severity = "MEDIUM" cluster["security_groups"][group_id].append( {"source": cidr_ip, "class": "AWS"} ) if public_count: findings.append( Finding( context.state, "Redshift clusters are publicly accessible", severity, cluster_count=cluster_count, public_count=public_count, instances=public, ) ) return findings
def run(self, context): findings = [] parameter_group_count = 0 flagged_parameter_group_count = 0 flagged = {} instances = {} # Search for parameter groups with the properties we are looking to flag. for region in context.regions: redshift = client(context, region_name=region) for page in redshift.get_paginator( "describe_cluster_parameter_groups" ).paginate(): for parameter_group in page["ParameterGroups"]: parameter_group_count += 1 group_name = parameter_group["ParameterGroupName"] for parameter in redshift.describe_cluster_parameters( ParameterGroupName=group_name )["Parameters"]: if ( parameter["ParameterName"] == self.name and parameter["ParameterValue"] == self.value ): flagged_parameter_group_count += 1 if region not in flagged: flagged[region] = [] flagged[region].append( { "group_name": group_name, "parameter_name": self.name, "parameter_value": self.value, "in_use": False, } ) # Next see if those parameter groups are actually used. severity = "INFO" instances = {} for region in context.regions: redshift = client(context, region_name=region) for page in redshift.get_paginator("describe_clusters").paginate(): for cluster in page["Clusters"]: for parameter_group in cluster["ClusterParameterGroups"]: group_name = parameter_group["ParameterGroupName"] for other_group in flagged[region]: if other_group["group_name"] == group_name: other_group["in_use"] = True severity = "MEDIUM" # If the default parameter group isn't used, then unflag it. for region in flagged: default_group = None for group in flagged[region]: if ( group["group_name"] == "default.redshift-1.0" and not group["in_use"] ): default_group = group break if default_group: flagged_parameter_group_count -= 1 flagged[region].remove(default_group) if flagged_parameter_group_count: findings.append( Finding( context.state, self.title, severity, parameter_group_count=parameter_group_count, flagged_parameter_group_count=flagged_parameter_group_count, instances=flagged, ) ) return findings
def run(self, context): used_security_groups = {} open_to_all = {} open_but_unused = {} used_open_gids = [] # Collect list of security groups attached to a network interface for region in context.regions: try: for instance in get_region_instances(context, region): for interface in instance["NetworkInterfaces"]: for group in interface["Groups"]: gid = group["GroupId"] info = { "instance": instance["InstanceId"], "interface": interface["NetworkInterfaceId"], "name": group["GroupName"], "ip": { "private": interface["PrivateIpAddress"], "public": None, }, } if "Association" in interface: info["ip"]["public"] = interface[ "Association"]["PublicIp"] if gid not in used_security_groups: used_security_groups[gid] = [info] else: used_security_groups[gid].append(info) except Exception as e: print(e) # Collect list of problematic security groups for region in context.regions: try: for group in get_region_secgroups(context, region): # Ingress rules for permission in group["IpPermissions"]: if permission["IpRanges"] == []: # Empty Security Group continue # Test for ports allowed from any IP if any( iprange.get("CidrIpv6", "") == "::/0" or iprange.get("CidrIp", "") == "0.0.0.0/0" for iprange in permission["IpRanges"]): proto = permission.get("IpProtocol", "-1") toport = permission.get("ToPort", -1) fromport = permission.get("FromPort", -1) if toport == fromport: port = toport else: port = f"{fromport}-{toport}" gid = group["GroupId"] if gid in used_security_groups: used_open_gids.append(gid) if region not in open_to_all: open_to_all[region] = { proto: { port: [gid] } } elif proto not in open_to_all[region]: open_to_all[region][proto] = {port: [gid]} elif port not in open_to_all[region][proto]: open_to_all[region][proto][port] = [gid] else: open_to_all[region][proto][port].append( gid) else: if region not in open_but_unused: open_but_unused[region] = { proto: { port: [gid] } } elif proto not in open_but_unused[region]: open_but_unused[region][proto] = { port: [gid] } elif port not in open_but_unused[region][ proto]: open_but_unused[region][proto][port] = [ gid ] else: open_but_unused[region][proto][ port].append(gid) except Exception as e: print(e) # Categorize unused security groups for findings flagged_groups = {} for group in used_security_groups: if group in used_open_gids: flagged_groups[group] = used_security_groups[group] if len(open_to_all.keys()): return [ Finding( context.state, "Security Groups with ports open to all IPs", "MEDIUM", open_all=open_to_all, open_unused=open_but_unused, used=flagged_groups, ) ] elif len(open_but_unused.keys()): return [ Finding( context.state, "Unused Security Groups with ports open to all IPs", "INFO", open_all=open_to_all, open_unused=open_but_unused, used=flagged_groups, ) ] return []
def run(self, context): buckets = get_all_buckets(context) findings = [] s3 = client(context) # No buckets? No problem! if not len(buckets): return findings # Get account ID to get account level S3 public access block data # TODO: This means of getting the account id works but is weak, and # likely breaks if ran cross-organizationally try: acct_pub_access = get_account_public_access(context) # This will be true if public access is blocked for the account acct_pub_blocked = (acct_pub_access["IgnorePublicAcls"] and acct_pub_access["RestrictPublicBuckets"]) # TODO: catch expected exception except: # No public access block on the account level defined acct_pub_blocked = False # Check each bucket acls_readonly = [] acls_write = [] acls_mitigated = [] for bucket in buckets: # Check if a public access block is set for the bucket try: pub_block = s3.get_public_access_block(Bucket=bucket) pub_access = pub_block["PublicAccessBlockConfiguration"] # True if public access is blocked on bucket level pub_blocked = (pub_access["IgnorePublicAcls"] and pub_access["RestrictPublicBuckets"]) # TODO: catch expected exception except: # No bucket level public access block defined pub_blocked = False # Scan for dangerous grants in ACLs acl = s3.get_bucket_acl(Bucket=bucket) for grant in acl["Grants"]: grantee, permission = grant["Grantee"], grant["Permission"] read = False write = False if grantee["Type"] == "Group" and grantee["URI"] == PUBLIC_URI: if permission == "FULL_CONTROL": read = True write = True elif permission in ["WRITE", "WRITE_ACP"]: write = True elif permission == "READ": read = True # Flip table? if write and acct_pub_blocked or pub_blocked: # A public access block mitigates this acl grant acls_mitigated.append(bucket) elif write: # WE FLIP TABLE acls_write.append(bucket) elif read: acls_readonly.append(bucket) if acls_readonly or acls_write or acls_mitigated: sev = "INFO" title = "S3 Buckets with public access" if len(acls_write): sev = "HIGH" title = "World writable S3 Buckets" findings.append( Finding( context.state, title, sev, acls_write=acls_write, acls_mitigated=acls_mitigated, acls_readonly=acls_readonly, )) # TODO bucket policies # TODO object level permissions return findings