def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") resources_found = [] filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] response = client.describe_route_tables(Filters=filters) if self.vpc_options.verbose: message_handler("Collecting data from Route Tables...", "HEADER") # Iterate to get all route table filtered for route_table in response["RouteTables"]: nametag = get_name_tag(route_table) name = route_table["RouteTableId"] if nametag is None else nametag table_digest = ResourceDigest(id=route_table["RouteTableId"], type="aws_route_table") is_main = False for association in route_table["Associations"]: if association["Main"] is True: is_main = True if is_main: self.relations_found.append( ResourceEdge( from_node=table_digest, to_node=self.vpc_options.vpc_digest(), )) else: for association in route_table["Associations"]: if "SubnetId" in association: self.relations_found.append( ResourceEdge( from_node=table_digest, to_node=ResourceDigest( id=association["SubnetId"], type="aws_subnet"), )) is_public = False for route in route_table["Routes"]: if ("DestinationCidrBlock" in route and route["DestinationCidrBlock"] == "0.0.0.0/0" and "GatewayId" in route and route["GatewayId"].startswith("igw-")): is_public = True resources_found.append( Resource( digest=table_digest, name=name, details="default: {}, public: {}".format( is_main, is_public), group="network", tags=resource_tags(route_table), )) return resources_found
def analyze_service(self, service_name, client_quota, threshold_requested): if service_name in SPECIAL_RESOURCES: return [] cache_key = "aws_limits_" + service_name + "_" + self.options.region_name cache = self.cache.get_key(cache_key) resources_found = [] if service_name not in cache: return [] """ Services that must be enabled in your account. Those services will fail you don't enable Fraud Detector: https://pages.awscloud.com/amazon-fraud-detector-preview.html# AWS Organizations: https://console.aws.amazon.com/organizations/ """ if service_name in ("frauddetector", "organizations"): message_handler( "Attention: Service " + service_name + " must be enabled to use API calls.", "WARNING", ) for data_quota_code in cache[service_name]: if data_quota_code is None: continue resource_found = self.analyze_quota( client_quota=client_quota, data_quota_code=data_quota_code, service=service_name, threshold_requested=threshold_requested, ) if resource_found is not None: resources_found.append(resource_found) return resources_found
def get_resources(self) -> List[Resource]: if self.vpc_options.verbose: message_handler("Collecting data from Instance Profiles...", "HEADER") paginator = self.vpc_options.client("iam").get_paginator( "list_instance_profiles" ) pages = paginator.paginate() resources_found = [] relations_found = [] for groups in pages: for data in groups["InstanceProfiles"]: profile_digest = ResourceDigest( id=data["InstanceProfileName"], type="aws_iam_instance_profile" ) resources_found.append( Resource( digest=profile_digest, name=data["InstanceProfileName"], details="", group="", ) ) if len(data["Roles"]) == 1: relations_found.append( ResourceEdge( from_node=profile_digest, to_node=ResourceDigest( id=data["Roles"][0]["RoleName"], type="aws_iam_role" ), ) ) self.relations_found = relations_found return resources_found
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) # pylint: disable=broad-except except Exception as e: if func.__qualname__ == "AllResources.analyze_operation": if not args[0].options.verbose: return exception_str = str(e) if ("is not subscribed to AWS Security Hub" in exception_str or "not enabled for securityhub" in exception_str or "The subscription does not exist" in exception_str or "calling the DescribeHub operation" in exception_str): message_handler( "Operation {} not accessible, AWS Security Hub is not configured... Skipping" .format(args[2]), "WARNING", ) elif ("not connect to the endpoint URL" in exception_str or "not available in this region" in exception_str or "API is not available" in exception_str): message_handler( "Service {} not available in the selected region... Skipping" .format(args[5]), "WARNING", ) elif ("Your account is not a member of an organization" in exception_str or "This action can only be made by accounts in an AWS Organization" in exception_str or "The request failed because organization is not in use" in exception_str): message_handler( "Service {} only available to account in an AWS Organization... Skipping" .format(args[5]), "WARNING", ) elif "is no longer available to new customers" in exception_str: message_handler( "Service {} is no longer available to new customers... Skipping" .format(args[5]), "WARNING", ) elif ("only available to Master account in AWS FM" in exception_str or "not currently delegated by AWS FM" in exception_str): message_handler( "Operation {} not accessible, not master account in AWS FM... Skipping" .format(args[2]), "WARNING", ) else: log_critical( "\nError running operation {}, type {}. Error message {}" .format(args[2], args[1], exception_str)) else: log_critical( "\nError running method {}. Error message {}".format( func.__qualname__, str(e)))
def get_resources(self) -> List[Resource]: boto_loader = Loader() if self.options.services: aws_services = self.options.services else: aws_services = boto_loader.list_available_services( type_name="service-2") resources = [] allowed_actions = self.get_policies_allowed_actions() if self.options.verbose: message_handler( "Analyzing listing operations across {} service...".format( len(aws_services)), "HEADER", ) with ThreadPoolExecutor(PARALLEL_SERVICE_CALLS) as executor: results = executor.map( lambda aws_service: self.analyze_service( aws_service, boto_loader, allowed_actions), aws_services, ) for service_resources in results: if service_resources is not None: resources.extend(service_resources) return resources
def get_resources(self) -> List[Resource]: if self.options.verbose: message_handler("Collecting data from IAM Roles...", "HEADER") paginator = self.client.get_paginator("list_roles") pages = paginator.paginate() resources_found = [] for roles in pages: for data in roles["Roles"]: resource_digest = ResourceDigest( id=data["RoleName"], type="aws_iam_role" ) tag_response = self.client.list_role_tags(RoleName=data["RoleName"],) resources_found.append( Resource( digest=resource_digest, name=data["RoleName"], details="", group="", tags=resource_tags(tag_response), ) ) if ( "AssumeRolePolicyDocument" in data and "Statement" in data["AssumeRolePolicyDocument"] ): for statement in data["AssumeRolePolicyDocument"]["Statement"]: resources_found.extend( self.analyze_assume_statement(resource_digest, statement) ) self.resources_found = resources_found return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") resources_found = [] filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] response = client.describe_security_groups(Filters=filters) if self.vpc_options.verbose: message_handler("Collecting data from Security Groups...", "HEADER") for data in response["SecurityGroups"]: group_digest = ResourceDigest(id=data["GroupId"], type="aws_security_group") resources_found.append( Resource( digest=group_digest, name=data["GroupName"], details="", group="network", tags=resource_tags(data), )) self.relations_found.append( ResourceEdge(from_node=group_digest, to_node=self.vpc_options.vpc_digest())) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("sqs") resources_found = [] response = client.list_queues() if self.vpc_options.verbose: message_handler("Collecting data from SQS Queue Policy...", "HEADER") if "QueueUrls" in response: with ThreadPoolExecutor(15) as executor: results = executor.map( lambda data: self.analyze_queues(client, data[1]), enumerate(response["QueueUrls"]), ) for result in results: if result[0] is True: resources_found.append(result[1]) return resources_found
def generate_diagram( self, resources: List[Resource], initial_resource_relations: List[ResourceEdge], title: str, filename: str, ): ordered_resources = self.group_by_group(resources, initial_resource_relations) relations = self.process_relationships( ordered_resources, initial_resource_relations ) output_filename = PATH_DIAGRAM_OUTPUT + filename with Diagram( name=title, filename=output_filename, direction="TB", show=False, graph_attr={"nodesep": "2.0", "ranksep": "1.0", "splines": "curved"}, ) as d: d.dot.engine = self.engine self.draw_diagram(ordered_resources=ordered_resources, relations=relations) message_handler("\n\nPNG diagram generated", "HEADER") message_handler("Check your diagram: " + output_filename + ".png", "OKBLUE")
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") resources_found = [] filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] response = client.describe_subnets(Filters=filters) if self.vpc_options.verbose: message_handler("Collecting data from Subnets...", "HEADER") for data in response["Subnets"]: nametag = get_name_tag(data) name = data["SubnetId"] if nametag is None else nametag subnet_digest = ResourceDigest(id=data["SubnetId"], type="aws_subnet") resources_found.append( Resource( digest=subnet_digest, name=name, details="Subnet using CidrBlock {} and AZ {}".format( data["CidrBlock"], data["AvailabilityZone"]), group="network", tags=resource_tags(data), )) self.relations_found.append( ResourceEdge(from_node=subnet_digest, to_node=self.vpc_options.vpc_digest())) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("es") resources_found = [] response = client.list_domain_names() if self.vpc_options.verbose: message_handler("Collecting data from Elasticsearch Domains...", "HEADER") for data in response["DomainNames"]: elasticsearch_domain = client.describe_elasticsearch_domain( DomainName=data["DomainName"] ) documentpolicy = elasticsearch_domain["DomainStatus"]["AccessPolicies"] document = json.dumps(documentpolicy, default=datetime_to_string) # check either vpc_id or potencial subnet ip are found ipvpc_found = check_ipvpc_inpolicy( document=document, vpc_options=self.vpc_options ) # elasticsearch uses accesspolicies too, so check both situation if ( elasticsearch_domain["DomainStatus"]["VPCOptions"]["VPCId"] == self.vpc_options.vpc_id or ipvpc_found is True ): list_tags_response = client.list_tags( ARN=elasticsearch_domain["DomainStatus"]["ARN"] ) digest = ResourceDigest( id=elasticsearch_domain["DomainStatus"]["DomainId"], type="aws_elasticsearch_domain", ) resources_found.append( Resource( digest=digest, name=elasticsearch_domain["DomainStatus"]["DomainName"], details="", group="analytics", tags=resource_tags(list_tags_response), ) ) for subnet_id in elasticsearch_domain["DomainStatus"]["VPCOptions"][ "SubnetIds" ]: self.relations_found.append( ResourceEdge( from_node=digest, to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), ) ) return resources_found
def html_report( self, resources: List[Resource], resource_relations: List[ResourceEdge], title: str, filename: str, ): dir_template = Environment( loader=FileSystemLoader( os.path.dirname(os.path.abspath(__file__)) + "/../templates/"), trim_blocks=True, ) """generate image64 to add to report""" diagram_image = None if filename is not None: image_name = PATH_DIAGRAM_OUTPUT + filename + ".png" if os.path.exists(image_name): with open(image_name, "rb") as image_file: diagram_image = base64.b64encode( image_file.read()).decode("utf-8") """generate diagrams.net link""" diagramsnet_image = None if filename is not None: image_name = PATH_DIAGRAM_OUTPUT + filename + ".drawio" if os.path.exists(image_name): diagramsnet_image = f"..{os.path.sep}..{os.path.sep}" + image_name group_title = "Group" if resources: if resources[0].limits: html_output = dir_template.get_template( "report_limits.html").render(default_name=title, resources_found=resources) else: if resources[0].attributes: group_title = "Service" html_output = dir_template.get_template( "report_html.html").render( default_name=title, resources_found=resources, resources_relations=resource_relations, diagram_image=diagram_image, diagramsnet_image=diagramsnet_image, group_title=group_title, ) self.make_directories() name_output = PATH_REPORT_HTML_OUTPUT + filename + ".html" with open(name_output, "w") as file_output: file_output.write(html_output) message_handler("\n\nHTML report generated", "HEADER") message_handler("Check your HTML report: " + name_output, "OKBLUE")
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("autoscaling") resources_found = [] response = client.describe_auto_scaling_groups() if self.vpc_options.verbose: message_handler("Collecting data from Autoscaling Groups...", "HEADER") for data in response["AutoScalingGroups"]: asg_subnets = data["VPCZoneIdentifier"].split(",") # Using subnet to check VPC subnets = describe_subnet( vpc_options=self.vpc_options, subnet_ids=asg_subnets ) if subnets is not None: # Iterate subnet to get VPC for data_subnet in subnets["Subnets"]: if data_subnet["VpcId"] == self.vpc_options.vpc_id: asg_name = data["AutoScalingGroupName"] digest = ResourceDigest( id=asg_name, type="aws_autoscaling_group" ) if "LaunchConfigurationName" in data: details = "Using LaunchConfigurationName {0}".format( data["LaunchConfigurationName"] ) else: details = "Using Launch Template" resources_found.append( Resource( digest=digest, name=asg_name, details=details, group="compute", tags=resource_tags(data), ) ) self.relations_found.append( ResourceEdge( from_node=digest, to_node=ResourceDigest( id=data_subnet["SubnetId"], type="aws_subnet" ), ) ) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") resources_found = [] response = client.describe_instances() if self.vpc_options.verbose: message_handler("Collecting data from EC2 Instances...", "HEADER") for data in response["Reservations"]: for instances in data["Instances"]: if "VpcId" in instances: if instances["VpcId"] == self.vpc_options.vpc_id: nametag = get_name_tag(instances) asg_name = get_tag(instances, "aws:autoscaling:groupName") instance_name = ( instances["InstanceId"] if nametag is None else nametag ) ec2_digest = ResourceDigest( id=instances["InstanceId"], type="aws_instance" ) resources_found.append( Resource( digest=ec2_digest, name=instance_name, details="", group="compute", tags=resource_tags(instances), ) ) self.relations_found.append( ResourceEdge( from_node=ec2_digest, to_node=ResourceDigest( id=instances["SubnetId"], type="aws_subnet" ), ) ) if asg_name is not None: self.relations_found.append( ResourceEdge( from_node=ec2_digest, to_node=ResourceDigest( id=asg_name, type="aws_autoscaling_group" ), ) ) return resources_found
def init_globalaws_limits_cache(self): """ AWS has global limit that can be adjustable and others that can't be adjustable This method make cache for 15 days for aws cache global parameters. AWS don't update limit every time. Services has differents limit, depending on region. """ for service_code in self.services: if service_code in ALLOWED_SERVICES_CODES: cache_key = "aws_limits_" + service_code + "_" + self.region cache = self.cache.get_key(cache_key) if cache is not None: continue if self.options.verbose: message_handler( "Fetching aws global limit to service {} in region {} to cache...".format( service_code, self.region ), "HEADER", ) cache_codes = dict() for quota_code in ALLOWED_SERVICES_CODES[service_code]: if quota_code != "global": """ Impossible to instance once at __init__ method. Global services such route53 MUST USE us-east-1 region """ if ALLOWED_SERVICES_CODES[service_code]["global"]: service_quota = self.session.client( "service-quotas", region_name="us-east-1" ) else: service_quota = self.session.client( "service-quotas", region_name=self.region ) item_to_add = self.get_quota( quota_code, service_code, service_quota ) if item_to_add is None: continue if service_code in cache_codes: cache_codes[service_code].append(item_to_add) else: cache_codes[service_code] = [item_to_add] self.cache.set_key(key=cache_key, value=cache_codes, expire=1296000) return True
def analyze_service(self, aws_service, boto_loader, allowed_actions): resources = [] client = self.options.client(aws_service) service_model = boto_loader.load_service_model(aws_service, "service-2") try: paginators_model = boto_loader.load_service_model( aws_service, "paginators-1") except UnknownServiceError: paginators_model = {"pagination": {}} service_full_name = service_model["metadata"]["serviceFullName"] if self.options.verbose: message_handler( "Collecting data from {}...".format(service_full_name), "HEADER") if (not self.availabilityCheck.is_service_available( self.options.region_name, aws_service) or aws_service in SKIPPED_SERVICES) and self.options.verbose: message_handler( "Service {} not available in this region... Skipping".format( service_full_name), "WARNING", ) return None for name, operation in service_model["operations"].items(): if (name.startswith("List") or name.startswith("Get") or name.startswith("Describe")): has_paginator = name in paginators_model["pagination"] if "input" in operation: input_model = service_model["shapes"][operation["input"] ["shape"]] if "required" in input_model and input_model["required"]: continue if (aws_service in REQUIRED_PARAMS_OVERRIDE and operation["name"] in REQUIRED_PARAMS_OVERRIDE[aws_service]): continue resource_type = build_resource_type(aws_service, name) if resource_type in OMITTED_RESOURCES: continue if not operation_allowed(allowed_actions, aws_service, name): continue analyze_operation = self.analyze_operation( resource_type, name, has_paginator, client, service_full_name, aws_service, ) if analyze_operation is not None: resources.extend(analyze_operation) return resources
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("mediaconnect") resources_found = [] response = client.list_flows() if self.vpc_options.verbose: message_handler("Collecting data from Media Connect...", "HEADER") for data in response["Flows"]: tags_response = client.list_tags_for_resource( ResourceArn=data["FlowArn"]) data_flow = client.describe_flow(FlowArn=data["FlowArn"]) if "VpcInterfaces" in data_flow["Flow"]: for data_interfaces in data_flow["Flow"]["VpcInterfaces"]: # Using subnet to check VPC subnets = describe_subnet( vpc_options=self.vpc_options, subnet_ids=data_interfaces["SubnetId"], ) if subnets is not None: if subnets["Subnets"][0][ "VpcId"] == self.vpc_options.vpc_id: digest = ResourceDigest(id=data["FlowArn"], type="aws_media_connect") resources_found.append( Resource( digest=digest, name=data["Name"], details= "Flow using VPC {} in VPC Interface {}". format(self.vpc_options.vpc_id, data_interfaces["Name"]), group="mediaservices", tags=resource_tags(tags_response), )) self.relations_found.append( ResourceEdge( from_node=digest, to_node=ResourceDigest( id=data_interfaces["SubnetId"], type="aws_subnet", ), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("sagemaker") resources_found = [] response = client.list_training_jobs() if self.vpc_options.verbose: message_handler("Collecting data from Sagemaker Training Job...", "HEADER") for data in response["TrainingJobSummaries"]: tags_response = client.list_tags( ResourceArn=data["TrainingJobArn"], ) training_job = client.describe_training_job( TrainingJobName=data["TrainingJobName"]) if "VpcConfig" in training_job: for subnets in training_job["VpcConfig"]["Subnets"]: # Using subnet to check VPC subnet = describe_subnet(vpc_options=self.vpc_options, subnet_ids=subnets) if subnet is not None: if subnet["Subnets"][0][ "VpcId"] == self.vpc_options.vpc_id: sagemaker_trainingjob_digest = ResourceDigest( id=data["TrainingJobArn"], type="aws_sagemaker_training_job", ) resources_found.append( Resource( digest=sagemaker_trainingjob_digest, name=data["TrainingJobName"], details="", group="ml", tags=resource_tags(tags_response), )) self.relations_found.append( ResourceEdge( from_node=sagemaker_trainingjob_digest, to_node=ResourceDigest(id=subnets, type="aws_subnet"), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") resources_found = [] filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] response = client.describe_vpc_endpoints(Filters=filters) if self.vpc_options.verbose: message_handler("Collecting data from VPC Endpoints...", "HEADER") for data in response["VpcEndpoints"]: if data["VpcId"] == self.vpc_options.vpc_id: endpoint_digest = ResourceDigest( id=data["VpcEndpointId"], type="aws_vpc_endpoint_gateway") if data["VpcEndpointType"] == "Gateway": resources_found.append( Resource( digest=endpoint_digest, name=data["VpcEndpointId"], details="Vpc Endpoint Gateway RouteTable {}". format(", ".join(data["RouteTableIds"])), group="network", tags=resource_tags(data), )) self.relations_found.append( ResourceEdge( from_node=endpoint_digest, to_node=self.vpc_options.vpc_digest(), )) else: resources_found.append( Resource( digest=endpoint_digest, name=data["VpcEndpointId"], details="Vpc Endpoint Service Subnet {}".format( ", ".join(data["SubnetIds"])), group="network", tags=resource_tags(data), )) for subnet_id in data["SubnetIds"]: self.relations_found.append( ResourceEdge( from_node=endpoint_digest, to_node=ResourceDigest(id=subnet_id, type="aws_subnet"), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("efs") resources_found = [] # get filesystems available response = client.describe_file_systems() if self.vpc_options.verbose: message_handler("Collecting data from EFS Mount Targets...", "HEADER") for data in response["FileSystems"]: filesystem = client.describe_mount_targets( FileSystemId=data["FileSystemId"]) nametag = get_name_tag(data) filesystem_name = data[ "FileSystemId"] if nametag is None else nametag # iterate filesystems to get mount targets for datafilesystem in filesystem["MountTargets"]: # Using subnet to check VPC subnets = describe_subnet( vpc_options=self.vpc_options, subnet_ids=datafilesystem["SubnetId"]) if subnets is not None: if subnets["Subnets"][0][ "VpcId"] == self.vpc_options.vpc_id: digest = ResourceDigest(id=data["FileSystemId"], type="aws_efs_file_system") resources_found.append( Resource( digest=digest, name=filesystem_name, details="", group="storage", tags=resource_tags(data), )) self.relations_found.append( ResourceEdge( from_node=digest, to_node=ResourceDigest( id=datafilesystem["SubnetId"], type="aws_subnet"), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") resources_found = [] response = client.describe_vpc_peering_connections() if self.vpc_options.verbose: message_handler("Collecting data from VPC Peering...", "HEADER") for data in response["VpcPeeringConnections"]: if (data["AccepterVpcInfo"]["VpcId"] == self.vpc_options.vpc_id or data["RequesterVpcInfo"]["VpcId"] == self.vpc_options.vpc_id): nametag = get_name_tag(data) name = data[ "VpcPeeringConnectionId"] if nametag is None else nametag peering_digest = ResourceDigest( id=data["VpcPeeringConnectionId"], type="aws_vpc_peering_connection", ) resources_found.append( Resource( digest=peering_digest, name=name, details= "Vpc Peering Accepter OwnerId {}, Accepter Region {}, Accepter VpcId {} \ Requester OwnerId {}, Requester Region {}, \ Requester VpcId {}". format( data["AccepterVpcInfo"]["OwnerId"], data["AccepterVpcInfo"]["Region"], data["AccepterVpcInfo"]["VpcId"], data["RequesterVpcInfo"]["OwnerId"], data["RequesterVpcInfo"]["Region"], data["RequesterVpcInfo"]["VpcId"], ), group="network", tags=resource_tags(data), )) self.relations_found.append( ResourceEdge( from_node=peering_digest, to_node=self.vpc_options.vpc_digest(), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("workspaces") resources_found = [] response = client.describe_workspaces() if self.vpc_options.verbose: message_handler("Collecting data from Workspaces...", "HEADER") for data in response["Workspaces"]: # Get tag name tags = client.describe_tags(ResourceId=data["WorkspaceId"]) nametag = get_name_tag(tags) workspace_name = data["WorkspaceId"] if nametag is None else nametag directory_service = self.vpc_options.client("ds") directories = directory_service.describe_directories( DirectoryIds=[data["DirectoryId"]]) for directorie in directories["DirectoryDescriptions"]: if "VpcSettings" in directorie: if directorie["VpcSettings"][ "VpcId"] == self.vpc_options.vpc_id: workspace_digest = ResourceDigest( id=data["WorkspaceId"], type="aws_workspaces") resources_found.append( Resource( digest=workspace_digest, name=workspace_name, details="", group="enduser", tags=resource_tags(tags), )) self.relations_found.append( ResourceEdge( from_node=workspace_digest, to_node=ResourceDigest( id=directorie["DirectoryId"], type="aws_ds"), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("kafka") resources_found = [] # get all cache clusters response = client.list_clusters() if self.vpc_options.verbose: message_handler("Collecting data from MSK Clusters...", "HEADER") # iterate cache clusters to get subnet groups for data in response["ClusterInfoList"]: msk_subnets = ", ".join(data["BrokerNodeGroupInfo"]["ClientSubnets"]) ec2 = self.vpc_options.session.resource( "ec2", region_name=self.vpc_options.region_name ) filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] subnets = ec2.subnets.filter(Filters=filters) for subnet in list(subnets): if subnet.id in msk_subnets: digest = ResourceDigest( id=data["ClusterArn"], type="aws_msk_cluster" ) resources_found.append( Resource( digest=digest, name=data["ClusterName"], details="", group="analytics", tags=resource_tags(data), ) ) self.relations_found.append( ResourceEdge( from_node=digest, to_node=ResourceDigest(id=subnet.id, type="aws_subnet"), ) ) break return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("sagemaker") resources_found = [] response = client.list_notebook_instances() if self.vpc_options.verbose: message_handler( "Collecting data from Sagemaker Notebook instances...", "HEADER") for data in response["NotebookInstances"]: notebook_instance = client.describe_notebook_instance( NotebookInstanceName=data["NotebookInstanceName"]) tags_response = client.list_tags( ResourceArn=data["NotebookInstanceArn"], ) # Using subnet to check VPC subnets = describe_subnet(vpc_options=self.vpc_options, subnet_ids=notebook_instance["SubnetId"]) if subnets is not None: if subnets["Subnets"][0]["VpcId"] == self.vpc_options.vpc_id: sagemaker_notebook_digest = ResourceDigest( id=data["NotebookInstanceArn"], type="aws_sagemaker_notebook_instance", ) resources_found.append( Resource( digest=sagemaker_notebook_digest, name=data["NotebookInstanceName"], details="", group="ml", tags=resource_tags(tags_response), )) self.relations_found.append( ResourceEdge( from_node=sagemaker_notebook_digest, to_node=ResourceDigest( id=notebook_instance["SubnetId"], type="aws_subnet"), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] if self.iot_options.verbose: message_handler("Collecting data from IoT Certificates...", "HEADER") for thing in self.iot_options.thing_name["things"]: response = client.list_thing_principals( thingName=thing["thingName"]) for data in response["principals"]: if "cert/" in data: lst_cert = data.split("/") data_cert = client.describe_certificate( certificateId=lst_cert[1]) tag_response = client.list_tags_for_resource( resourceArn=data_cert["certificateDescription"] ["certificateArn"]) iot_cert_digest = ResourceDigest( id=data_cert["certificateDescription"] ["certificateId"], type="aws_iot_certificate", ) resources_found.append( Resource( digest=iot_cert_digest, name=data_cert["certificateDescription"] ["certificateId"], details="", group="iot", tags=resource_tags(tag_response), )) self.relations_found.append( ResourceEdge( from_node=iot_cert_digest, to_node=ResourceDigest(id=thing["thingName"], type="aws_iot_thing"), )) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("emr") resources_found = [] response = client.list_clusters() if self.vpc_options.verbose: message_handler("Collecting data from EMR Clusters...", "HEADER") for data in response["Clusters"]: cluster = client.describe_cluster(ClusterId=data["Id"]) # Using subnet to check VPC subnets = describe_subnet( vpc_options=self.vpc_options, subnet_ids=cluster["Cluster"]["Ec2InstanceAttributes"]["Ec2SubnetId"], ) if subnets is not None: if subnets["Subnets"][0]["VpcId"] == self.vpc_options.vpc_id: digest = ResourceDigest(id=data["Id"], type="aws_emr_cluster") resources_found.append( Resource( digest=digest, name=data["Name"], details="", group="compute", tags=resource_tags(cluster["Cluster"]), ) ) self.relations_found.append( ResourceEdge( from_node=digest, to_node=ResourceDigest( id=cluster["Cluster"]["Ec2InstanceAttributes"][ "Ec2SubnetId" ], type="aws_subnet", ), ) ) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("neptune") resources_found = [] response = client.describe_db_instances( Filters=[{"Name": "engine", "Values": ["neptune"]}] ) if self.vpc_options.verbose: message_handler("Collecting data from Neptune Instances...", "HEADER") # iterate cache clusters to get subnet groups for data in response["DBInstances"]: if data["DBSubnetGroup"]["VpcId"] == self.vpc_options.vpc_id: tags_response = client.list_tags_for_resource( ResourceName=data["DBInstanceArn"] ) neptune_digest = ResourceDigest( id=data["DBInstanceArn"], type="aws_neptune_cluster" ) subnet_ids = [] for subnet in data["DBSubnetGroup"]["Subnets"]: subnet_ids.append(subnet["SubnetIdentifier"]) self.relations_found.append( ResourceEdge( from_node=neptune_digest, to_node=ResourceDigest( id=subnet["SubnetIdentifier"], type="aws_subnet" ), ) ) resources_found.append( Resource( digest=neptune_digest, name=data["DBInstanceIdentifier"], details="Neptune using subnets {} and engine {}".format( ", ".join(subnet_ids), data["Engine"] ), group="database", tags=resource_tags(tags_response), ) ) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("elasticache") resources_found = [] # get all cache clusters response = client.describe_cache_clusters() if self.vpc_options.verbose: message_handler("Collecting data from Elasticache Clusters...", "HEADER") # iterate cache clusters to get subnet groups for data in response["CacheClusters"]: cachesubnet = client.describe_cache_subnet_groups( CacheSubnetGroupName=data["CacheSubnetGroupName"] ) if cachesubnet["CacheSubnetGroups"][0]["VpcId"] == self.vpc_options.vpc_id: ec_digest = ResourceDigest( id=data["CacheClusterId"], type="aws_elasticache_cluster" ) subnet_ids = [] for subnet in cachesubnet["CacheSubnetGroups"][0]["Subnets"]: subnet_ids.append(subnet["SubnetIdentifier"]) self.relations_found.append( ResourceEdge( from_node=ec_digest, to_node=ResourceDigest( id=subnet["SubnetIdentifier"], type="aws_subnet" ), ) ) resources_found.append( Resource( digest=ec_digest, name=data["CacheSubnetGroupName"], details="Elasticache Cluster using subnets {} and engine {}".format( ", ".join(subnet_ids), data["Engine"] ), group="database", ) ) return resources_found
def get_resources(self) -> List[Resource]: client = self.iot_options.client("iot") resources_found = [] if self.iot_options.verbose: message_handler("Collecting data from IoT Policies...", "HEADER") for thing in self.iot_options.thing_name["things"]: response = client.list_thing_principals(thingName=thing["thingName"]) for data in response["principals"]: policies = client.list_principal_policies(principal=data) for policy in policies["policies"]: data_policy = client.get_policy(policyName=policy["policyName"]) tag_response = client.list_tags_for_resource( resourceArn=data_policy["policyArn"] ) iot_policy_digest = ResourceDigest( id=data_policy["policyArn"], type="aws_iot_policy" ) resources_found.append( Resource( digest=iot_policy_digest, name=data_policy["policyName"], details="", group="iot", tags=resource_tags(tag_response), ) ) self.relations_found.append( ResourceEdge( from_node=iot_policy_digest, to_node=ResourceDigest( id=thing["thingName"], type="aws_iot_thing" ), ) ) return resources_found
def get_resources(self) -> List[Resource]: client = self.vpc_options.client("ec2") resources_found = [] filters = [{"Name": "vpc-id", "Values": [self.vpc_options.vpc_id]}] response = client.describe_nat_gateways(Filters=filters) if self.vpc_options.verbose: message_handler("Collecting data from NAT Gateways...", "HEADER") for data in response["NatGateways"]: if data["VpcId"] == self.vpc_options.vpc_id and data[ "State"] != "deleted": nametag = get_name_tag(data) name = data["NatGatewayId"] if nametag is None else nametag nat_digest = ResourceDigest(id=data["NatGatewayId"], type="aws_nat_gateway") resources_found.append( Resource( digest=nat_digest, name=name, details= "NAT Gateway Private IP {}, Public IP {}, Subnet id {}" .format( data["NatGatewayAddresses"][0]["PrivateIp"], data["NatGatewayAddresses"][0]["PublicIp"], data["SubnetId"], ), group="network", tags=resource_tags(data), )) self.relations_found.append( ResourceEdge( from_node=nat_digest, to_node=ResourceDigest(id=data["SubnetId"], type="aws_subnet"), )) return resources_found