def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { iam { userDetail { data { farosAccountId farosRegionId userId userName mfaDevices { data { serialNumber } } } } } } }''' response = client.graphql_execute(query) users = response["aws"]["iam"]["userDetail"]["data"] return [{ "userName": u["userName"], "userId": u["userId"], "farosAccountId": u["farosAccountId"], "farosRegionId": u["farosRegionId"] } for u in users if not u["mfaDevices"]["data"]]
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { s3 { bucket { data { farosAccountId farosRegionId name policyStatus { isPublic } publicAccessBlock { blockPublicAcls blockPublicPolicy ignorePublicAcls restrictPublicBuckets } } } } } }''' response = client.graphql_execute(query) buckets = response["aws"]["s3"]["bucket"]["data"] return [{ "farosAccountId": b["farosAccountId"], "farosRegionId": b["farosRegionId"], "name": b["name"] } for b in buckets if is_bucket_public(b)]
def lambda_handler(event, context): client = FarosClient.from_event(event) params = event['params'] report_name = params['report_name'] recipient = params['recipient'] subject = params[ 'subject'] if 'subject' in params else 'Faros AI notification' records = event.get('data') if records: header = list(records[0].keys()) data = [[record[k] for k in header] for record in records] else: header = None data = [] file_loader = FileSystemLoader(os.path.dirname(__file__)) env = Environment(loader=file_loader) template = env.get_template('email.html') html = template.render(report_name=report_name, header=header, data=data) sender = '*****@*****.**' query = '''mutation($to: [String!]!, $subject: String!, $htmlBody: String!) { faros_send_email( to: $to subject: $subject textBody: "This report can only be seen in HTML enabled email clients" htmlBody: $htmlBody ) }''' variables = {"to": [recipient], "subject": subject, "htmlBody": html} response = client.graphql_execute(query, variables) return response
def lambda_handler(event, context): client = FarosClient.from_event(event) count = int(event["params"]["ip_count"]) if count < 1: raise ValueError("IP count should be a positive integer") query = '''{ aws { ec2 { subnet { data { farosAccountId farosRegionId subnetId availableIpAddressCount } } } } }''' response = client.graphql_execute(query) subnets = response["aws"]["ec2"]["subnet"]["data"] return [ subnet for subnet in subnets if subnet["availableIpAddressCount"] < count ]
def lambda_handler(event, context): client = FarosClient.from_event(event) query = """{ aws { iam { groupDetail { data { farosAccountId farosRegionId groupName groupId groupPolicyList { policyDocument policyName } } } } } }""" response = client.graphql_execute(query) groups = response["aws"]["iam"]["groupDetail"]["data"] return [g for g in groups if full_star_policy(g["groupPolicyList"])]
def lambda_handler(event, context): client = FarosClient.from_event(event) reference_time = int(event["params"].get("reference_time_secs", datetime.now().timestamp())) window_size = int(event["params"].get("window_days", 30)) compare_to = int(event["params"].get("compare_to_days", 7)) scope_employee_uid = str(event["params"].get("employee", "")) previous_window, current_window = get_comparison_windows( datetime.fromtimestamp(reference_time), window_size, compare_to) with open(os.path.join(APP_DIR, "query.gql"), "r") as file: query = file.read() variables = { "scoped": bool(scope_employee_uid), "scopeEmployeeUid": scope_employee_uid, "cutoff": int(previous_window.since.timestamp() * 1000) } response = client.graphql_execute(query) jpath = SCOPED_JPATH if scope_employee_uid else UNSCOPED_JPATH deploy_nodes = [m.value for m in jpath.find(response)] deploys = [d for d in deploy_nodes if d.get("endedAt") is not None] deploys.sort(key=lambda d: (d["application"]["name"], d["endedAt"])) deploys_previous = filter_deploys(deploys, previous_window) deploys_current = filter_deploys(deploys, current_window) mrt_previous = mean_recovery_time(deploys_previous) mrt_current = mean_recovery_time(deploys_current) return { "Mean Time to Recovery": mrt_current, "Mean Recovery Time Change": relative_change(mrt_current, mrt_previous), }
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { ec2 { securityGroup { data { farosAccountId farosRegionId groupId instances { data { instanceId } } } } } } }''' response = client.graphql_execute(query) groups = response["aws"]["ec2"]["securityGroup"]["data"] return [g for g in groups if not g["instances"]["data"]]
def lambda_handler(event, context): client = FarosClient.from_event(event) cutoff = int(event["params"]["max_days"]) query = '''{ github { pullRequest { data { number title state repo_name locked created_at updated_at user_login assignee_login } } } }''' response = client.graphql_execute(query) pull_requests = response["github"]["pullRequest"]["data"] return [ p for p in pull_requests if time_diff(p["created_at"]).days > cutoff and p["state"] == "open" ]
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { ec2 { volume { data { farosAccountId farosRegionId volumeId state instance { instanceId state { name } } } } } } }''' response = client.graphql_execute(query) volumes = response["aws"]["ec2"]["volume"]["data"] volumes_with_stopped_instances = [] for volume in volumes: if volume.get("instance"): if volume["instance"]["state"]["name"] == "stopped": volumes_with_stopped_instances.append(volume) return volumes_with_stopped_instances
def lambda_handler(event, context): client = FarosClient.from_event(event) query = """{ aws { iam { roleDetail { data { farosAccountId farosRegionId roleId roleName rolePolicyList { policyName policyDocument } } } } } }""" response = client.graphql_execute(query) roles = response["aws"]["iam"]["roleDetail"]["data"] return [r for r in roles if full_star_policy(r["rolePolicyList"])]
def lambda_handler(event, context): client = FarosClient.from_event(event) start_time = int( event["params"].get( "start_time_secs", (datetime.now() - timedelta(days=30)).timestamp() ) ) end_time = int(event["params"].get( "end_time_secs", datetime.now().timestamp())) scope_employee_id = str(event["params"].get("employee", "")) with open(os.path.join(APP_DIR, "query.gql"), "r") as file: query = file.read() variables = { "scoped": bool(scope_employee_id), "scopeEmployeeUid": scope_employee_id, "startTime": start_time * 1000, "endTime": end_time * 1000 } response = client.graphql_execute(query, variables) jpath = SCOPED_JPATH if scope_employee_id else UNSCOPED_JPATH deploy_nodes = [m.value for m in jpath.find(response)] deployments = [ d for d in deploy_nodes if d.get("endedAt") is not None and d["status"] == "Success" ] deployments.sort(key=lambda d: d["endedAt"]) breakdown = [DeployStages(d) for d in deployments] return [b.to_dict() for b in breakdown]
def lambda_handler(event, context): client = FarosClient.from_event(event) cutoff = int(event["params"]["num_days"]) if cutoff < 1: raise ValueError("num days should be a positive integer") query = '''{ aws { ec2 { instance { data { farosAccountId farosRegionId instanceId launchTime state { name } } } } } }''' response = client.graphql_execute(query) instances = response["aws"]["ec2"]["instance"]["data"] return [ i for i in instances if i["state"]["name"] == "running" and time_diff(i["launchTime"]).days > cutoff ]
def lambda_handler(event, context): client = FarosClient.from_event(event) cutoff = int(event["params"]["max_days"]) query = '''{ github { repository { data { branches { data { name repo_name commit_sha protected commit { sha date } } } } } } }''' response = client.graphql_execute(query) repos = response["github"]["repository"]["data"] return [ b for repo in repos for b in repo["branches"]["data"] if is_stale_branch(b, cutoff) ]
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { lambda { functionConfiguration { data { farosAccountId farosRegionId functionName functionArn deadLetterConfig { targetArn } } } } } }''' response = client.graphql_execute(query) functions = response["aws"]["lambda"]["functionConfiguration"]["data"] return [f for f in functions if not f["deadLetterConfig"]["targetArn"]]
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { ec2 { vpc { data { farosAccountId farosRegionId vpcId vpcEndpoints { data { state serviceName vpcEndpointId } } } } } } }''' response = client.graphql_execute(query) vpcs = response["aws"]["ec2"]["vpc"]["data"] non_compliant_vpcs = [{ "farosAccountId": vpc["farosAccountId"], "farosRegionId": vpc["farosRegionId"], "vpcId": vpc["vpcId"] } for vpc in vpcs if not vpc_has_s3_enabled_endpoint(vpc)] return non_compliant_vpcs
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { s3 { bucket { data { farosAccountId farosRegionId name encryption { rules { applyServerSideEncryptionByDefault { kmsMasterKeyID sseAlgorithm } } } } } } } }''' response = client.graphql_execute(query) buckets = response["aws"]["s3"]["bucket"]["data"] return [{ "farosAccountId": b["farosAccountId"], "farosRegionId": b["farosRegionId"], "name": b["name"] } for b in buckets if not b["encryption"]]
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { ec2 { volume { data { farosAccountId farosRegionId volumeId state instance { instanceId } attachments { instanceId } } } } } }''' response = client.graphql_execute(query) volumes = response["aws"]["ec2"]["volume"]["data"] return [v for v in volumes if not v["attachments"]]
def lambda_handler(event, context): client = FarosClient.from_event(event) required_keys = frozenset(event["params"]["keys"].split(",")) query = '''{ aws { ec2 { instance { data { farosAccountId farosRegionId instanceId tags { key value } } } } } }''' response = client.graphql_execute(query) instances = response["aws"]["ec2"]["instance"]["data"] tagless_instances = [{ "instance": i, "missingKeys": missing_tags(required_keys, frozenset([t["key"] for t in i["tags"]])) } for i in instances] tagless_instances = [i for i in tagless_instances if i["missingKeys"]] return tagless_instances
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ github { issue { data { number title state repo_name created_at labels { name } comments assignee_login pull_request { url } } } } }''' response = client.graphql_execute(query) issues = response["github"]["issue"]["data"] return [ i for i in issues if i["state"] == "open" and not i.get("labels") and not i.get("assignee_login") and not i.get("pull_request") ]
def lambda_handler(event, context): client = FarosClient.from_event(event) cutoff = int(event["params"]["max_days"]) if cutoff < 1: raise ValueError("max days should be a positive integer") query = '''{ aws { iam { user { data { farosAccountId farosRegionId userId userName passwordLastUsed } } } } }''' response = client.graphql_execute(query) users = response["aws"]["iam"]["user"]["data"] return [ u for u in users if u["passwordLastUsed"] is None or days_diff(u["passwordLastUsed"]) > cutoff ]
def lambda_handler(event, context): client = FarosClient.from_event(event) required_policy_arns = event["params"]["required_policy_arns"].split(",") query = """{ aws { iam { userDetail { data { farosAccountId farosRegionId userId userName attachedManagedPolicies { policyArn policyName } groups { data { groupId groupName attachedManagedPolicies { policyArn policyName } } } } } } } }""" response = client.graphql_execute(query) users = response["aws"]["iam"]["userDetail"]["data"] users_without_policies = [] required_policy_arns = event["params"]["required_policy_arns"].split(",") for user in users: policies = get_policy_arns(user) missing_policies = get_missing_policies(required_policy_arns, policies) if missing_policies: users_without_policies.append({ "userId": user["userId"], "userName": user["userName"], "farosAccountId": user["farosAccountId"], "farosRegionId": user["farosRegionId"], "missingPolicies": missing_policies} ) return users_without_policies
def lambda_handler(event, context): client = FarosClient.from_event(event) tag_name = event["params"]["tag_name"] tag_value = event["params"]["tag_value"] query = '''{ aws { ec2 { instance { data { farosAccountId farosRegionId instanceId instanceType state { name } tags { key value } volumes { data { size volumeType } } } } } } }''' response = client.graphql_execute(query) instances = response["aws"]["ec2"]["instance"]["data"] infra = [] for i in instances: if i["state"]["name"] == "running": for t in i["tags"]: if t["key"] == tag_name and t["value"] == tag_value: infra.append({ "account": i["farosAccountId"], "region": i["farosRegionId"], "instanceId": i["instanceId"], "instanceType": i["instanceType"], "volumes": i["volumes"]["data"] }) return infra
def lambda_handler(event, context): client = FarosClient.from_event(event) cutoff = int(event["params"]["max_days"]) query = '''{ github { user { data { login name email membership commits { data { sha date } } issues { data { state updated_at } } pullRequests { data { state updated_at } } } } } }''' response = client.graphql_execute(query) users = response["github"]["user"]["data"] return [ { "login": u["login"], "name": u["name"], "email": u["email"], "membership": u["membership"] } for u in users if not has_recent_activity(u, cutoff) ]
def lambda_handler(event, context): client = FarosClient.from_event(event) required_policy_param = event["params"].get("required_policy_statement") if not required_policy_param: raise KeyError("Required policy statements not supplied. You can " "define them with -p " "required_policy_statement=<comma-separated-policies>") required_policies = required_policy_param.split(",") query = '''{ aws { s3 { bucket { data { farosAccountId farosRegionId name policy { policy } } } } } }''' response = client.graphql_execute(query) buckets = response["aws"]["s3"]["bucket"]["data"] buckets_missing_policy = [] for bucket in buckets: missing = get_missing_policies(bucket["policy"], required_policies) if missing: buckets_missing_policy.append({ "name": bucket["name"], "missingPolicies": missing, "farosAccountId": bucket["farosAccountId"], "farosRegionId": bucket["farosRegionId"], }) return buckets_missing_policy
def lambda_handler(event, context): client = FarosClient.from_event(event) cutoff = int(event["params"]["max_days"]) if cutoff < 1: raise ValueError("max days should be a positive integer") query = '''{ aws { iam { userDetail { data { farosAccountId farosRegionId userId userName accessKeys { data { status createDate accessKeyId } } } } } } }''' response = client.graphql_execute(query) users = response["aws"]["iam"]["userDetail"]["data"] old_access_keys = [] for user in users: old_keys = [ key for key in user["accessKeys"]["data"] if key["status"] == "Active" and days_diff(key["createDate"]) > cutoff ] if old_keys: old_access_keys.append({ "userId": user["userId"], "userName": user["userName"], "accessKeys": old_keys, "farosAccountId": user["farosAccountId"], "farosRegionId": user["farosRegionId"] }) return old_access_keys
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { ec2 { volume { data { farosAccountId farosRegionId volumeId tags { key value } state instance { tags { key value } } } } } } }''' response = client.graphql_execute(query) volumes = response["aws"]["ec2"]["volume"]["data"] volumes_with_missing_tags = [] for v in volumes: if v["state"] == "in-use": instance_tags = frozenset( [t["key"] for t in v["instance"]["tags"]]) volume_tags = frozenset([t["key"] for t in v["tags"]]) delta = missing_tags(instance_tags, volume_tags) if delta: volumes_with_missing_tags.append({ "volume": v, "missingKeys": delta }) return volumes_with_missing_tags
def lambda_handler(event, context): client = FarosClient.from_event(event) reference_time = int(event["params"].get( "reference_time_secs", datetime.now().timestamp())) window_size = int(event["params"].get("window_days", 30)) compare_to = int(event["params"].get("compare_to_days", 7)) scope_employee_uid = str(event["params"].get("employee", "")) previous_window, current_window = get_comparison_windows( datetime.fromtimestamp(reference_time), window_size, compare_to) with open(os.path.join(APP_DIR, "query.gql"), "r") as file: query = file.read() variables = { "scoped": bool(scope_employee_uid), "scopeEmployeeUid": scope_employee_uid, "cutoff": int(previous_window.since.timestamp() * 1000) } response = client.graphql_execute(query) jpath = SCOPED_JPATH if scope_employee_uid else UNSCOPED_JPATH deploy_nodes = [m.value for m in jpath.find(response)] deploys = [d for d in deploy_nodes if d.get("endedAt") is not None] deploys.sort(key=lambda d: (d["application"]["name"], d["endedAt"])) failed_deploys_prev = filter_deploys(deploys, "Failed", previous_window) failed_deploys_curr = filter_deploys(deploys, "Failed", current_window) success_deploys_prev = filter_deploys(deploys, "Success", previous_window) success_deploys_curr = filter_deploys(deploys, "Success", current_window) mtbf_prev = mean_time_between_failures(failed_deploys_prev) mtbf_curr = mean_time_between_failures(failed_deploys_curr) fr_prev = failure_rate(failed_deploys_prev, success_deploys_prev) fr_curr = failure_rate(failed_deploys_curr, success_deploys_curr) return { "Change Failure Rate": fr_curr, "Change Failure Rate Change": relative_change(fr_curr, fr_prev), "Mean Time between Failures": mtbf_curr, "Mean Time between Failures Change": relative_change(mtbf_curr, mtbf_prev) }
def lambda_handler(event, context): client = FarosClient.from_event(event) query = """{ aws { iam { userDetail { data { farosAccountId farosRegionId userId userName attachedManagedPolicies { policyArn policyName } userPolicyList { policyName policyDocument } groups { data { groupId groupName attachedManagedPolicies { policyArn policyName } groupPolicyList { policyName policyDocument } } } } } } } }""" response = client.graphql_execute(query) users = response["aws"]["iam"]["userDetail"]["data"] return [u for u in users if has_full_star_policy(u)]
def lambda_handler(event, context): client = FarosClient.from_event(event) # First, fetch the query from the input key. query_key = event['params'].get('key') if query_key is None: raise RuntimeError("missing 'key' parameter") res = requests.get(QUERY_BASE_URL + query_key) if res.status_code == 404: # Nicer error if the query did not exist. raise RuntimeError('no such query: {!r}'.format(query_key)) res.raise_for_status() # Any other error type. query = res.json()['query'] # Then, execute it and apply any JSONPath expression. data = client.graphql_execute(query['doc']) json_path = query.get('dataPath') if json_path: data = jsonpath_rw_ext.match(json_path, data) return data
def lambda_handler(event, context): client = FarosClient.from_event(event) query = '''{ aws { ec2 { vpcEndpoint { data { farosAccountId farosRegionId vpcId policyDocument } } } } }''' response = client.graphql_execute(query) endpoints = response["aws"]["ec2"]["vpcEndpoint"]["data"] return [e for e in endpoints if check_statements(e["policyDocument"])]