async def post(self): """ POST /api/v2/requests """ arguments = {k: self.get_argument(k) for k in self.request.arguments} markdown = arguments.get("markdown") cache_key = config.get("cache_all_policy_requests.redis_key", "ALL_POLICY_REQUESTS") s3_bucket = config.get("cache_policy_requests.s3.bucket") s3_key = config.get("cache_policy_requests.s3.file") arguments = json.loads(self.request.body) filters = arguments.get("filters") sort = arguments.get("sort") limit = arguments.get("limit", 1000) tags = {"user": self.user} stats.count("RequestsHandler.post", tags=tags) log_data = { "function": "RequestsHandler.post", "user": self.user, "message": "Writing requests", "limit": limit, "filters": filters, "user-agent": self.request.headers.get("User-Agent"), "request_id": self.request_uuid, } log.debug(log_data) requests = await retrieve_json_data_from_redis_or_s3( cache_key, s3_bucket=s3_bucket, s3_key=s3_key) if not sort: # Default sort of requests is by request_time descending. requests = sorted(requests, key=lambda i: i.get("request_time", 0), reverse=True) if filters: try: with Timeout(seconds=5): for filter_key, filter_value in filters.items(): requests = await filter_table(filter_key, filter_value, requests) except TimeoutError: self.write("Query took too long to run. Check your filter.") await self.finish() raise if markdown: requests_to_write = [] for request in requests[0:limit]: resource_name = request["arn"].split(":")[5] if "/" in resource_name: resource_name = resource_name.split("/")[-1] region = request["arn"].split(":")[3] service_type = request["arn"].split(":")[2] account_id = request["arn"].split(":")[4] try: url = await get_url_for_resource( request["arn"], service_type, account_id, region, resource_name, ) except ResourceNotFound: url = None # Convert request_id and role ARN to link if request.get("version") == "2": request[ "request_id"] = f"[{request['request_id']}](/policies/request/{request['request_id']})" # Legacy support for V1 requests. Pending removal. else: request[ "request_id"] = f"[{request['request_id']}](/policies/request_v1/{request['request_id']})" if url: request["arn"] = f"[{request['arn']}]({url})" requests_to_write.append(request) else: requests_to_write = requests[0:limit] self.write(json.dumps(requests_to_write)) return
async def post(self): """ POST /api/v2/requests """ arguments = {k: self.get_argument(k) for k in self.request.arguments} markdown = arguments.get("markdown") cache_key = config.get("cache_all_policy_requests.redis_key", "ALL_POLICY_REQUESTS") s3_bucket = config.get("cache_policy_requests.s3.bucket") s3_key = config.get( "cache_policy_requests.s3.file", "policy_requests/all_policy_requests_v1.json.gz", ) arguments = json.loads(self.request.body) filters = arguments.get("filters") # TODO: Add server-side sorting # sort = arguments.get("sort") limit = arguments.get("limit", 1000) tags = {"user": self.user} stats.count("RequestsHandler.post", tags=tags) log_data = { "function": "RequestsHandler.post", "user": self.user, "message": "Writing requests", "limit": limit, "filters": filters, "user-agent": self.request.headers.get("User-Agent"), "request_id": self.request_uuid, } log.debug(log_data) requests = await retrieve_json_data_from_redis_or_s3( cache_key, s3_bucket=s3_bucket, s3_key=s3_key) total_count = len(requests) if filters: try: with Timeout(seconds=5): for filter_key, filter_value in filters.items(): requests = await filter_table(filter_key, filter_value, requests) except TimeoutError: self.write("Query took too long to run. Check your filter.") await self.finish() raise if markdown: requests_to_write = [] for request in requests[0:limit]: principal_arn = request.get("principal", {}).get("principal_arn", "") url = request.get("principal", {}).get("resource_url", "") resource_name = principal_arn if "/" in resource_name: resource_name = resource_name.split("/")[-1] if not resource_name: resource_name = request.get("principal", {}).get("resource_identifier") if principal_arn and principal_arn.count(":") == 5 and not url: region = principal_arn.split(":")[3] service_type = principal_arn.split(":")[2] account_id = principal_arn.split(":")[4] if request.get("principal", {}).get("principal_arn"): try: url = await get_url_for_resource( principal_arn, service_type, account_id, region, resource_name, ) except ResourceNotFound: pass # Convert request_id and role ARN to link request_url = request.get("extended_request", {}).get("request_url") if not request_url: request_url = f"/policies/request/{request['request_id']}" request[ "request_id"] = f"[{request['request_id']}]({request_url})" if url: request[ "arn"] = f"[{principal_arn or resource_name}]({url})" requests_to_write.append(request) else: requests_to_write = requests[0:limit] filtered_count = len(requests_to_write) res = DataTableResponse(totalCount=total_count, filteredCount=filtered_count, data=requests_to_write) self.write(res.json()) return
async def post(self): """ POST /api/v2/policies """ arguments = {k: self.get_argument(k) for k in self.request.arguments} markdown = arguments.get("markdown") arguments = json.loads(self.request.body) filters = arguments.get("filters") limit = arguments.get("limit", 1000) tags = {"user": self.user} stats.count("PoliciesHandler.post", tags=tags) log_data = { "function": "PoliciesHandler.post", "user": self.user, "message": "Writing policies", "limit": limit, "filters": filters, "user-agent": self.request.headers.get("User-Agent"), "request_id": self.request_uuid, } log.debug(log_data) policies = await retrieve_json_data_from_redis_or_s3( redis_key=config.get("policies.redis_policies_key", "ALL_POLICIES"), s3_bucket=config.get("cache_policies_table_details.s3.bucket"), s3_key=config.get("cache_policies_table_details.s3.file"), default=[], ) if filters: try: with Timeout(seconds=5): for filter_key, filter_value in filters.items(): policies = await filter_table(filter_key, filter_value, policies) except TimeoutError: self.write("Query took too long to run. Check your filter.") await self.finish() raise if markdown: policies_to_write = [] for policy in policies[0:limit]: resource_name = policy.get("arn").split(":")[5] if "/" in resource_name: resource_name = resource_name.split("/")[-1] region = policy["arn"].split(":")[3] url = await get_url_for_resource( policy["arn"], policy["technology"], policy["account_id"], region, resource_name, ) if url: policy["arn"] = f"[{policy['arn']}]({url})" if not policy.get("templated"): policy["templated"] = "N/A" else: if "/" in policy["templated"]: link_name = policy["templated"].split("/")[-1] policy[ "templated"] = f"[{link_name}]({policy['templated']})" policies_to_write.append(policy) else: policies_to_write = policies[0:limit] self.write(json.dumps(policies_to_write)) return
) redislite_client = redislite.Redis(redislite_db_path) redislite_socket_path = f"redis+socket://{redislite_client.socket_file}" app = Celery( "tasks", broker=f"{redislite_socket_path}?virtual_host=1", backend=f"{redislite_socket_path}?virtual_host=2", ) app.conf.result_expires = config.get("celery.result_expires", 60) app.conf.worker_prefetch_multiplier = config.get("celery.worker_prefetch_multiplier", 4) app.conf.task_acks_late = config.get("celery.task_acks_late", True) if config.get("celery.purge") and not config.get("redis.use_redislite"): # Useful to clear celery queue in development with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"): app.control.purge() @app.task(soft_time_limit=600) def cache_application_information(): """ This task retrieves application information from configuration. You may want to override this function to utilize your organization's CI/CD pipeline for this information. :return: """ apps_to_roles = {} for k, v in config.get("application_settings", {}).items(): apps_to_roles[k] = v.get("roles", []) red.set(