"Sid": "AWSCloudTrailPutLogEvents", "Effect": "Allow", "Action": ["logs:PutLogEvents"], "Resource": f"{resource}" }] }) # build resource string for iam_role_generate function resource = Output.concat("arn:aws:logs:", region, ":", account_id, ":log-group:", cloudwatch_log.name, ":log-stream:", log_stream.name, "*") # Apply policy to role role_policy = iam.RolePolicy("cloudwatch_log_stream", role=role.id, policy=resource.apply(iam_role_generate)) # Create cloudtrail for s3 and write to s3 bucket trail_s3 = cloudtrail.Trail('CloudTrail_logging_for_s3', opts=ResourceOptions(depends_on=[bucket_policy]), cloud_watch_logs_group_arn=cloudwatch_log.arn, cloud_watch_logs_role_arn=role.arn, enable_logging=True, enable_log_file_validation=True, name="Cloudtrail_s3", s3_bucket_name=bucket.id, s3_key_prefix='prefix', is_multi_region_trail=True, event_selectors=[{ "dataResources": [{
def __init__(self, name, opts=None): super().__init__("nuage:aws:Analytics", name, None, opts) account_id = get_caller_identity().account_id region = config.region bucket = s3.Bucket(f"{name}Bucket") firehose_role = iam.Role( f"{name}FirehoseRole", assume_role_policy=get_firehose_role_trust_policy_document(account_id), ) delivery_stream = kinesis.FirehoseDeliveryStream( f"{name}DeliveryStream", destination="extended_s3", extended_s3_configuration={ "bucketArn": bucket.arn, "role_arn": firehose_role.arn, "compressionFormat": "GZIP", }, opts=ResourceOptions(depends_on=[bucket, firehose_role]), ) firehose_role_policy = iam.RolePolicy( f"{name}DeliveryStreamPolicy", role=firehose_role.name, policy=get_firehose_role_policy_document( region, account_id, bucket.arn, delivery_stream.name ).apply(json.dumps), ) pinpoint_app = pinpoint.App(f"{name}PinpointApp") pinpoint_stream_role = iam.Role( f"{name}PinpointStreamRole", assume_role_policy=get_pinpoint_stream_role_trust_policy_document(), ) pinpoint_stream_role_policy = iam.RolePolicy( f"{name}PinpointStreamPolicy", role=pinpoint_stream_role.name, policy=get_pinpoint_stream_role_policy_document( region, account_id, delivery_stream.name, pinpoint_app.application_id ).apply(json.dumps), opts=ResourceOptions(depends_on=[pinpoint_stream_role, delivery_stream]), ) # IAM roles can take time to propogate so we have to add an artificial delay pinpoint_stream_role_delay = Delay( "EventStreamRoleDelay", 10, opts=ResourceOptions(depends_on=[pinpoint_stream_role_policy]), ) pinpoint_stream = pinpoint.EventStream( f"{name}PinpointEventStream", application_id=pinpoint_app.application_id, destination_stream_arn=delivery_stream.arn, role_arn=pinpoint_stream_role.arn, opts=ResourceOptions( depends_on=[delivery_stream, pinpoint_app, pinpoint_stream_role_delay,] ), ) self.set_outputs( { "bucket_name": bucket.id, "delivery_stream_name": delivery_stream.name, "destination_stream_arn": delivery_stream.arn, "pinpoint_application_name": pinpoint_app.name, "pinpoint_application_id": pinpoint_app.application_id, } )
"Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""" ) # Give lambda fuctions access to S3, DynamoDB and Rekognition lambda_role_policy = iam.RolePolicy('lambdaRolePolicy', role=lambda_role.id, policy="""{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "s3:*", "dynamodb:PutItem", "dynamodb:Scan", "rekognition:*" ], "Resource": "*" }] }""" )
"Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""") lambda_role_policy = iam.RolePolicy('lambdaRolePolicy', role=lambda_role.id, policy="""{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "arn:aws:logs:*:*:*" }] }""") lamdba_vpc_enabled = iam.RolePolicyAttachment( 'lamdba_vpc_enabled', role=lambda_role.id, policy_arn= "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole")
{ "Sid": "AllowKms", "Effect": "Allow", "Action": "kms:Decrypt", "Resource": f"{args[2]}", }, { "Sid": "AllowSQS", "Effect": "Allow", "Action": "sqs:*", "Resource": f"{args[3]}", }, ], })) iam.RolePolicy(f"{MODULE_NAME}-role-policy", role=role.id, policy=policy) iam.RolePolicyAttachment( f"{MODULE_NAME}-xray", policy_arn="arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess", role=role.id, ) aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.6", handler="lambda_handler.twitch_chat_bot", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", tracing_config={"mode": "Active"},
], } ), ) # Create a policy for the cicd role cicd_policy = iam.RolePolicy( resource_name="CICD-sample-policy", policy=json.dumps( { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetBucketLocation", "s3:ListAllMyBuckets"], "Resource": "arn:aws:s3:::*", }, {"Effect": "Allow", "Action": "s3:*", "Resource": ["*"], }, {"Effect": "Allow", "Action": "lambda:*", "Resource": "*", }, {"Effect": "Allow", "Action": "logs:*", "Resource": "*", }, ], } ), role=cicd_role.name ) # Create a build project prebuild_project = codebuild.Project( resource_name="sample", name="sample", build_timeout=30, # In minutes
bucket=LAMBDA_BUCKET, source=FileAsset(LAMBDA_PACKAGE), content_type=mime_type) lambda_role = default_iam.default_iam_role(service_naming_convention, lambda_name) iam.RolePolicy(service_naming_convention + '-cleanup-old-amis-policy', role=lambda_role.id, policy="""{ "Version": "2012-10-17", "Statement": [ { "Sid": "AllowCleanupAMIs", "Effect": "Allow", "Action": [ "ec2:DescribeImages", "ec2:DescribeImageAttribute", "ec2:DeregisterImage" ], "Resource": "*" } ] }""") cleanup_old_amis = lambda_.Function( service_naming_convention + '_' + lambda_name, s3_bucket=LAMBDA_BUCKET, s3_key=LAMBDA_VERSION + '/' + LAMBDA_PACKAGE, handler="delete_old_amis.handler", runtime=runtime,
def __init__(self, name, datalake_bucket: s3.Bucket = None, datalake_raw_path: str = None, fileproc_bucket: s3.Bucket = None, managed_policy_arns: List[str] = None, package_dir: str = None, tags: Dict[str, str] = None, opts: pulumi.ResourceOptions = None): super().__init__('hca:GlueNotificationLambda', name, None, opts) merged_tags = tags.copy() if tags else {} merged_tags.update({'hca:dataclassification': 'pii'}) role = iam.Role(f"{name}-role", path="/lambda/", description=f"role for glue notification lambda", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" } }] }), force_detach_policies=True, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) # attach managed policies if managed_policy_arns: for index, policy in enumerate(managed_policy_arns): iam.RolePolicyAttachment( f"{name}-attach-policy-{index}", policy_arn=policy, role=role, opts=pulumi.ResourceOptions(parent=self)) fileprocpolicy = iam.RolePolicy( f"{name}-inline-policy", role=role, policy=pulumi.Output.all(datalake_bucket.bucket, fileproc_bucket.bucket).apply( lambda b: inline_policy(b[0], b[1])), opts=pulumi.ResourceOptions(parent=self)) self.function = lambda_.Function( f"{name}-function", runtime='python3.6', description= 'copy files from fileproc bucket to datalake raw bucket and trigger glue jobs', handler='glue_notification.main', environment={ 'variables': { 'S3_DATALAKE_BUCKET': datalake_bucket, 'S3_RAW_PATH': datalake_raw_path, 'PULUMI_STACK': pulumi.get_stack(), 'PULUMI_PROJECT': pulumi.get_project() } }, memory_size=256, timeout=60, code=pulumi.AssetArchive({ # use lambda-glue-notification created with build.py '.': pulumi.FileArchive(package_dir), }), role=role.arn, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f"{name}-permission", action='lambda:InvokeFunction', principal='s3.amazonaws.com', function=self.function, source_arn=fileproc_bucket.arn, opts=pulumi.ResourceOptions(parent=self))
"MyAnalyticsIdentityPool", allow_unauthenticated_identities=True, identity_pool_name="MyAnalyticsIdentityPool", ) unauthenticated_role = iam.Role( "MyAnalyticsUnauthRole", assume_role_policy=get_unauthenticated_role_trust_policy_document( identity_pool.id), ) unauthenticated_role_policy = iam.RolePolicy( f"MyAnalyticsUnauthRolePolicy", role=unauthenticated_role, policy=get_unauthenticated_role_policy_document( config.region, get_caller_identity().account_id, analytics.pinpoint_application_id, ).apply(json.dumps), opts=ResourceOptions(depends_on=[analytics, unauthenticated_role]), ) cognito.IdentityPoolRoleAttachment( "MyAnalyticsIDPoolRoleAttach", identity_pool_id=identity_pool.id, roles={"unauthenticated": unauthenticated_role.arn}, ) pulumi.export("bucket_name", analytics.bucket_name) pulumi.export("delivery_stream_name", analytics.delivery_stream_name) pulumi.export("delivery_stream_arn", analytics.destination_stream_arn) pulumi.export("pinpoint_application_name", analytics.pinpoint_application_name)
def generate_dynamo_data_source(type_name): """ Generates a DynamoDB data source for the given GraphQL type. This includes the Dynamo table, the AppSync data source, a data source role, and the resolvers. NOTE: This function generates Dynamo tables with a hash key called `id`, but no other keys. :param type_name The name of the GraphQL type. This is the identifier which appears after the `type` keyword in the schema. """ table = dynamodb.Table( f"{stack_name}_{type_name}_table", name=f"{stack_name}.{type_name}", hash_key="id", attributes=[{ "name": "id", "type": "S" }], #stream_view_type="NEW_AND_OLD_IMAGES", billing_mode="PAY_PER_REQUEST") data_source_iam_role = iam.Role(f"{stack_name}_{type_name}_role", assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "appsync.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }""") data_source_iam_role_policy = iam.RolePolicy( f"{stack_name}_{type_name}_role_policy", role=data_source_iam_role.name, name="MyDynamoDBAccess", policy=table.name.apply(lambda table_name: f"""{{ "Version": "2012-10-17", "Statement": [ {{ "Effect": "Allow", "Action": [ "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem", "dynamodb:PutItem", "dynamodb:DeleteItem", "dynamodb:GetItem", "dynamodb:Scan", "dynamodb:Query", "dynamodb:UpdateItem" ], "Resource": [ "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}", "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}/*" ] }} ] }}""")) data_source = appsync.DataSource( f"{stack_name}_{type_name}_data_source", api_id=graphql_api.id, name=f"{type_name}TableDataSource", type="AMAZON_DYNAMODB", service_role_arn=data_source_iam_role.arn, dynamodb_config={"table_name": table.name}, opts=ResourceOptions(depends_on=[data_source_iam_role])) resolvers = generate_resolvers(type_name, data_source) return { "table": table, "data_source_iam_role": data_source_iam_role, "data_source_iam_role_policy": data_source_iam_role_policy, "data_source": data_source, "resolvers": resolvers }
def create_lambda_execution_roles(region, account): # Create lambda execution role lambda_assume_role_policy = iam.get_policy_document(statements=[{ "actions": ["sts:AssumeRole"], "principals": [{ "identifiers": ["lambda.amazonaws.com"], "type": "Service", }], }]) lambda_execution_role = iam.Role( "sendMessagelambda", assume_role_policy=lambda_assume_role_policy.json) iam.RolePolicy("RolePolicyAttachment", role=lambda_execution_role.id, policy=f"""{{ "Version": "2012-10-17", "Statement": [ {{ "Effect": "Allow", "Action": [ "ec2:CreateNetworkInterface", "logs:CreateLogStream", "ec2:DescribeNetworkInterfaces", "ec2:DeleteNetworkInterface", "logs:CreateLogGroup", "logs:PutLogEvents" ], "Resource": "*" }}, {{ "Effect": "Allow", "Action": [ "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "arn:aws:logs:{region.name}:{account}:log-group:*" }}, {{ "Effect": "Allow", "Action": "logs:CreateLogGroup", "Resource": "arn:aws:logs:{region.name}:{account}:*" }}, {{ "Effect": "Allow", "Action": [ "execute-api:ManageConnections", "execute-api:Invoke" ], "Resource": [ "arn:aws:execute-api:{region.name}:{account}:*" ] }}, {{ "Action": "ec2:*", "Effect": "Allow", "Resource": "*" }} ] }} """) return {"role": lambda_execution_role}
"Action": "sts:AssumeRole", }], }), ) def sfn_policy(): return json.dumps({ "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Action": "lambda:InvokeFunction", "Resource": "*", }, { "Sid": "", "Effect": "Allow", "Action": "sts:AssumeRole", "Resource": f"{pulumi_config.require('roles')}", }, ], }) sfn_role_policy = iam.RolePolicy("ce-sfn-role-policy", role=sfn_role.id, policy=sfn_policy())
"Statement": [{ "Action": "sts:AssumeRole", "Principal": { "Service": "ec2.amazonaws.com" }, "Effect": "Allow", "Sid": "" }] })) role_policy = iam.RolePolicy("myrolepolicy", role=role.id, policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Action": ["ec2:Describe*"], "Effect": "Allow", "Resource": "*" }] })) policy = iam.Policy("mypolicy", policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Action": ["ec2:Describe*"], "Effect": "Allow", "Resource": "*" }]
"Service": "lambda.amazonaws.com", }, "Effect": "Allow", "Sid": "", }] })) api_lambda_role_policy = iam.RolePolicy('shaht-lambda-api-iam-policy', role=api_lambda_role.id, policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "*" }] })) ######### LAYERS ########### artifacts_bucket = s3.Bucket('artifacts') # Upload ffmpeg library to bucket api_airtable_layer_zip = s3.BucketObject( 'hello',
"Version": "2012-10-17", "Statement": [{ "Action": "sts:AssumeRole", "Principal": { "Service": "apigateway.amazonaws.com" }, "Effect": "Allow", "Sid": "ApigatewayAssumeRole", }], } role = iam.Role(MODULE_NAME, assume_role_policy=json.dumps(API_GATEWAY_AUTH_INVOCATION)) iam.RolePolicy(f"{MODULE_NAME}-role-policy", role=role.id, policy=authorizer_role_policy()) # resource "aws_api_gateway_authorizer" "demo" { # name = "demo" # rest_api_id = "${aws_api_gateway_rest_api.demo.id}" # authorizer_uri = "${aws_lambda_function.authorizer.invoke_arn}" # authorizer_credentials = "${aws_iam_role.invocation_role.arn}" # } # (Optional, required for type TOKEN/REQUEST) The authorizer's Uniform Resource Identifier (URI). # This must be a well-formed Lambda function URI in the form of # arn:aws:apigateway:{region}:lambda:path/{service_api}, # # arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations # error: Plan apply failed: authorizer_uri must be set non-empty when authorizer type is REQUEST