def _create_compute_iam_roles(self, name): # According to AWS docs, this trust policy is required for the masters & the agents # TODO: can we curl for this & check if its different? use the updated one & log if different. # Note: multi line string requires open bracket here. Adding a newline results in a malformed policy doc mrp ="""{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "eks.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }""" #Trust policy for the worker role wrp ="""{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }""" policy_arn_string = "arn:aws:iam::aws:policy/" eks_master_role = iam.Role("eks-service-role", name="%s-master-role" % name, description="role for eks service", assume_role_policy=mrp, __opts__=ResourceOptions(parent=self)) eks_worker_role = iam.Role("eks-service-worker-role", name="%s-worker-role" % name, description="role for eks worker nodes", assume_role_policy=wrp, __opts__=ResourceOptions(parent=self)) eks_worker_instance_profile = iam.InstanceProfile("eks_worker_instance_profile", name="%s-instance-profile" % name, role=eks_worker_role.id, __opts__=ResourceOptions(parent=self)) # attach required policies to the master plane d1 = iam.PolicyAttachment("policy-AmazonEKSClusterPolicy", policy_arn="%sAmazonEKSClusterPolicy" % policy_arn_string, roles=[eks_master_role.id], __opts__=ResourceOptions(parent=self)) d2 = iam.PolicyAttachment("policy-AmazonEKSServicePolicy", policy_arn="%sAmazonEKSServicePolicy" % policy_arn_string, roles=[eks_master_role.id], __opts__=ResourceOptions(parent=self)) # attach required policies to the worker nodes iam.PolicyAttachment("policy-AmazonEKSWorkerNodePolicy", policy_arn="%sAmazonEKSWorkerNodePolicy" % policy_arn_string, roles=[eks_worker_role.id], __opts__=ResourceOptions(parent=self)) iam.PolicyAttachment("policy-AmazonEKS_CNI_Policy", policy_arn="%sAmazonEKS_CNI_Policy" % policy_arn_string, roles=[eks_worker_role.id], __opts__=ResourceOptions(parent=self)) iam.PolicyAttachment("policy-AmazonEC2ContainerRegistryReadOnly", policy_arn="%sAmazonEC2ContainerRegistryReadOnly" % policy_arn_string, roles=[eks_worker_role.id], __opts__=ResourceOptions(parent=self)) self.eks_master_role = eks_master_role.arn self.eks_worker_role = eks_worker_role self.cluster_role_attachment_dependencies = [d1, d2] self.eks_worker_instance_profile = eks_worker_instance_profile.name
def default_iam_role(service_naming_convention, lambda_name): current = get_partition() lambda_role = iam.Role( lambda_name + '_' + service_naming_convention, assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""" ) iam.RolePolicyAttachment( lambda_name + '_' + service_naming_convention + '_default_policy', role=lambda_role.name, policy_arn=f"arn:{current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" ) return lambda_role
def enableFlowLoggingToCloudWatchLogs(self, trafficType: Input[str]): """ Enable VPC flow logging to CloudWatch Logs, for the specified traffic type :param self: VPC instance :param trafficType: The traffic type to log: "ALL", "ACCEPT" or "REJECT" :return: None """ self.flow_logs_role = iam.Role( f"{self.name}-flow-logs-role", tags={ **self.base_tags, "Name": f"{self.description} VPC Flow Logs" }, assume_role_policy=assume_role_policy_for_principal({ "Service": "vpc-flow-logs.amazonaws.com", }), opts=pulumi.ResourceOptions(parent=self.vpc, )) self.flow_logs_group = cloudwatch.LogGroup( f"{self.name}-vpc-flow-logs", tags={ **self.base_tags, "Name": f"{self.description} VPC Flow Logs" }, opts=pulumi.ResourceOptions(parent=self.vpc, )) iam.RolePolicy(f"{self.name}-flow-log-policy", name="vpc-flow-logs", role=self.flow_logs_role.id, policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Resource": "*", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "logs:DescribeLogGroups", "logs:DescribeLogStreams", ] }] }), opts=pulumi.ResourceOptions(parent=self.flow_logs_role)) ec2.FlowLog(f"{self.name}-flow-logs", log_destination=self.flow_logs_group.arn, iam_role_arn=self.flow_logs_role.arn, vpc_id=self.vpc.id, traffic_type=trafficType, opts=pulumi.ResourceOptions(parent=self.flow_logs_role))
def add_lambda(self, archive_path: str, sns_topic: sns.Topic): """ Create lambda function with sns invoke permission """ lambda_role = iam.Role( resource_name=format_resource_name("lambda-role"), assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""") lambda_role_policy = iam.RolePolicy( resource_name=format_resource_name("lambda-policy"), role=lambda_role.id, policy="""{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "arn:aws:logs:*:*:*" }] }""") mail_processor_function = lambda_.Function( resource_name=format_resource_name("function"), role=lambda_role.arn, runtime="python3.7", handler="handler.lambda_handler", code=archive_path, source_code_hash=filebase64sha256(archive_path)) allow_sns = lambda_.Permission( resource_name=format_resource_name("permissions"), action="lambda:InvokeFunction", function=mail_processor_function.name, principal="sns.amazonaws.com", source_arn=sns_topic.arn) return mail_processor_function
def create_iam_resources(self): # We create a master role, and attach some policies for it, and allow all egress. self.master_role = iam.Role(self.cluster_name + "-master-role", assume_role_policy=ASSUME_ROLE_POLICY_DOC, opts=pulumi.ResourceOptions(parent=self)) iam.RolePolicyAttachment( self.cluster_name + "-master-AmazonEKSClusterPolicy", policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", role=self.master_role, opts=pulumi.ResourceOptions(parent=self)) iam.RolePolicyAttachment( self.cluster_name + "-master-AmazonEKSServicePolicy", policy_arn="arn:aws:iam::aws:policy/AmazonEKSServicePolicy", role=self.master_role, opts=pulumi.ResourceOptions(parent=self))
def generate_role(name, resources, **ropts): role = iam.Role( f'{name}', assume_role_policy={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com", } }] }, **ropts, ) iam.RolePolicyAttachment(f'{name}-base', role=role, policy_arn=BASIC_POLICY.arn, **opts(parent=role)) if resources: iam.RolePolicy( f'{name}-policy', role=role, policy={ "Version": "2012-10-17", # FIXME: Reduce this "Statement": [{ "Effect": "Allow", "Action": "*", # FIXME: More reasonable permissions "Resource": pulumi.Output.all( *[res[0].arn for res in resources.values()]) }] }, **opts(parent=role)) return role
table_name = job_dependency['target_table'] if modulename in table_catalog: table_catalog[modulename].add(table_name) else: table_catalog[modulename] = set([table_name]) # create iam role for crawlers to assume crawler_role = iam.Role('crawler-role', assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "glue.amazonaws.com" } }] }), description="datalake mart crawler", force_detach_policies=True, path='/glue/', name_prefix=f"{project}-{env}-mart-crawler") # attach policies to crawler role iam.RolePolicyAttachment('crawler-kms-policy', role=crawler_role, policy_arn=infra.policy_kms_full_usage.arn) iam.RolePolicyAttachment('crawler-glue-service-policy', role=crawler_role, policy_arn=infra.policy_glue_service.arn)
lambda_package = LambdaPackage( name="example-test", layer=True, no_deploy=["pulumi", "pulumi_aws", "pulumi_docker"], exclude=["__main__.py"], ) role = iam.Role( resource_name="role", description=f"Role used by Lambda to run the `{pulumi.get_project()}-{pulumi.get_stack()}` project", assume_role_policy=json.dumps( { "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": {"Service": ["lambda.amazonaws.com"]}, } ], } ), ) # Attach the basic Lambda execution policy to our Role iam.RolePolicyAttachment( resource_name="policy-attachment", policy_arn="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", role=role.name, )
def createPolicy(provider: Provider): return iam.Role(resource_name='my-policy', assume_role_policy={"Version": "2012-10-17"})
# Copyright 2016-2018, Pulumi Corporation. All rights reserved. from pulumi_aws import config, iam lambda_role = iam.Role('lambdaRole', assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""" ) lambda_role_policy = iam.RolePolicy('lambdaRolePolicy', role=lambda_role.id, policy="""{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ],
def __init__(self, name, scripts_bucket: s3.Bucket = None, managed_policy_arns: List[str] = [], tags: Dict[str, str] = None, opts: pulumi.ResourceOptions = None): super().__init__('hca:ScriptArchiveLambda', name, None, opts) merged_tags = tags.copy() if tags else {} merged_tags.update({'hca:dataclassification': 'pii'}) role = iam.Role(f"{name}-role", path="/lambda/", description=f"role for script archive lambda", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" } }] }), force_detach_policies=True, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) # attach managed policies if managed_policy_arns: for index, policy in enumerate(managed_policy_arns): iam.RolePolicyAttachment( f"{name}-attach-policy-{index}", policy_arn=policy, role=role, opts=pulumi.ResourceOptions(parent=self)) fileprocpolicy = iam.RolePolicy( f"{name}-inline-policy", role=role, policy=scripts_bucket.bucket.apply(inline_policy), opts=pulumi.ResourceOptions(parent=self)) print( f"archive function => {os.path.abspath(os.path.join(os.getcwd(),'../../src/lambdas/scripts_archive.py'))}" ) self.function = lambda_.Function( f"{name}-function", runtime='python3.6', description= 'copy files from fileproc bucket to datalake raw bucket and trigger glue jobs', handler='index.main', memory_size=128, timeout=30, code=pulumi.AssetArchive({ # NOTE use relative path from pulumi root 'index.py': pulumi.FileAsset( os.path.abspath( os.path.join(os.getcwd(), '../../src/lambdas/scripts_archive.py'))), }), #code=pulumi.FileAsset(os.path.abspath(os.path.join(os.getcwd(),'../../src/lambdas/scripts_archive.py'))), role=role.arn, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f"{name}-permission", action='lambda:InvokeFunction', principal='s3.amazonaws.com', function=self.function, source_arn=scripts_bucket.arn, opts=pulumi.ResourceOptions(parent=self))
def generate_dynamo_data_source(self, graphql_api, type_name): """ Generates a DynamoDB data source for the given GraphQL type. This includes the Dynamo table, the AppSync data source, a data source role, and the resolvers. NOTE: This function generates Dynamo tables with a hash key called `id`, but no other keys. :param type_name The name of the GraphQL type. This is the identifier which appears after the `type` keyword in the schema. """ table = dynamodb.Table( f"{self.stack_name}_{type_name}_table", name=f"{self.stack_name}_{self.random_chars}.{type_name}", hash_key="id", attributes=[{"name": "id", "type": "S"}], # stream_view_type="NEW_AND_OLD_IMAGES", billing_mode="PAY_PER_REQUEST", ) data_source_iam_role = iam.Role( f"{self.stack_name}_{type_name}_role", assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "appsync.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }""", ) aws_region = config.region account_id = get_caller_identity().account_id data_source_iam_role_policy = iam.RolePolicy( f"{self.stack_name}_{type_name}_role_policy", role=data_source_iam_role.name, name="MyDynamoDBAccess", policy=table.name.apply( lambda table_name: f"""{{ "Version": "2012-10-17", "Statement": [ {{ "Effect": "Allow", "Action": [ "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem", "dynamodb:PutItem", "dynamodb:DeleteItem", "dynamodb:GetItem", "dynamodb:Scan", "dynamodb:Query", "dynamodb:UpdateItem" ], "Resource": [ "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}", "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}/*" ] }} ] }}""" ), ) data_source = appsync.DataSource( f"{self.stack_name}_{type_name}_data_source", api_id=graphql_api.id, name=f"{type_name}TableDataSource_{self.random_chars}", type="AMAZON_DYNAMODB", service_role_arn=data_source_iam_role.arn, dynamodb_config={"table_name": table.name}, opts=ResourceOptions(depends_on=[data_source_iam_role]), ) resolvers = self.generate_resolvers(graphql_api, type_name, data_source) return { "table": table, "data_source_iam_role": data_source_iam_role, "data_source_iam_role_policy": data_source_iam_role_policy, "data_source": data_source, "resolvers": resolvers, }
This is an example Pulumi program which creates a Nuage Analytics pipeline component, as well as a Cognito Identity Pool which allows anonymous authentication. This stack can be used to power the example website in the `example` folder. """ analytics = Analytics("MyAnalytics") identity_pool = cognito.IdentityPool( "MyAnalyticsIdentityPool", allow_unauthenticated_identities=True, identity_pool_name="MyAnalyticsIdentityPool", ) unauthenticated_role = iam.Role( "MyAnalyticsUnauthRole", assume_role_policy=get_unauthenticated_role_trust_policy_document( identity_pool.id), ) unauthenticated_role_policy = iam.RolePolicy( f"MyAnalyticsUnauthRolePolicy", role=unauthenticated_role, policy=get_unauthenticated_role_policy_document( config.region, get_caller_identity().account_id, analytics.pinpoint_application_id, ).apply(json.dumps), opts=ResourceOptions(depends_on=[analytics, unauthenticated_role]), ) cognito.IdentityPoolRoleAttachment(
name="Tenant", hash_key="id", attributes=[{ "name": "id", "type": "S" }], read_capacity=1, write_capacity=1) ## Create IAM role and policy wiring role = iam.Role("iam-role", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Action": "sts:AssumeRole", "Principal": { "Service": "appsync.amazonaws.com" }, "Effect": "Allow", }] })) policy = iam.Policy( "iam-policy", policy=table.arn.apply(lambda arn: json.dumps({ "Version": "2012-10-17", "Statement": [{ "Action": ["dynamodb:PutItem", "dynamodb:GetItem"], "Effect": "Allow", "Resource": [arn]
import pulumi import json from modules.s3 import bucket from modules.iam import LAMBDA_ASSUME_ROLE_POLICY from modules.iam import CREATE_CW_LOGS_POLICY from pulumi_aws import iam, lambda_ from pulumi_aws import cloudwatch from modules.layers import dependency_layer config = pulumi.Config() MODULE_NAME = "morgue-stalker" role = iam.Role(f"{MODULE_NAME}-role", assume_role_policy=json.dumps(LAMBDA_ASSUME_ROLE_POLICY)) def lambda_role_policy(bucket_arn): return json.dumps({ "Version": "2012-10-17", "Statement": [ CREATE_CW_LOGS_POLICY, { "Effect": "Allow", "Action": ["s3:PutObject", "s3:ListObjectsV2"], "Resource": bucket_arn, }, { "Effect": "Allow", "Action": ["s3:GetObject"],
import json from pulumi_aws import sns, iam from modules.iam import CREATE_CW_LOGS_POLICY from modules.sqs import gods_queue, weapons_queue role = iam.Role( f"topic-role", assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "sns.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""", ) sns_topic = sns.Topic( f"gods-topic", sqs_failure_feedback_role_arn=role.arn, sqs_success_feedback_role_arn=role.arn, ) weapons_topic = sns.Topic(
def __init__( self, name, vpc_environment: VPC, efs_environment: EFS, github_repo_name: Input[str], github_version_name: Input[str] = None, opts=None, ): super().__init__("nuage:aws:DevelopmentEnvironment:CodeBuild", f"{name}CodebuildEnvironment", None, opts) # TODO pass this in - with a default? def get_codebuild_serice_role_policy(): return { "Version": "2012-10-17", "Statement": [{ "Action": "*", "Effect": "Allow", "Resource": "*" }] } account_id = get_caller_identity().account_id #TODO add random chars on the end of default name to prevent conflicts project_name = f"{name}BuildDeploy" pulumi_token_param = ssm.Parameter(f"{name}PulumiAccessToken", type="SecureString", value="none") codebuild_vpc_policy = iam.Policy( f"{name}CodeBuildVpcPolicy", policy=get_codebuild_vpc_policy( account_id, vpc_environment.private_subnet.id).apply(json.dumps)) codebuild_base_policy = iam.Policy(f"{name}CodeBuildBasePolicy", policy=json.dumps( get_codebuild_base_policy( account_id, project_name))) codebuild_service_role_policy = iam.Policy( f"{name}CodeBuildServiceRolePolicy", policy=json.dumps(get_codebuild_serice_role_policy())) codebuild_service_role = iam.Role(f"{name}CodeBuildRole", assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "codebuild.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }""") codebuild_vpn_policy_attach = iam.PolicyAttachment( f"{name}CodeBuildVpnAttachment", policy_arn=codebuild_vpc_policy.arn, roles=[codebuild_service_role.name]) codebuild_base_policy_attach = iam.PolicyAttachment( f"{name}CodeBuildBaseAttachment", policy_arn=codebuild_base_policy.arn, roles=[codebuild_service_role.name]) codebuild_service_role_policy_attach = iam.PolicyAttachment( f"{name}CodeBuildServiceRoleAttachment", policy_arn=codebuild_service_role_policy.arn, roles=[codebuild_service_role.name]) codebuild_project = codebuild.Project( f"{name}CodeBuildProject", description="Builds and deploys the stack", name=project_name, vpc_config={ "vpc_id": vpc_environment.vpc.id, "subnets": [vpc_environment.private_subnet], "security_group_ids": [vpc_environment.security_group.id] }, source={ "type": "GITHUB", "location": github_repo_name }, source_version=github_version_name, artifacts={"type": "NO_ARTIFACTS"}, environment={ "image": "aws/codebuild/amazonlinux2-x86_64-standard:2.0", "privileged_mode": True, "type": "LINUX_CONTAINER", "compute_type": "BUILD_GENERAL1_SMALL", "environment_variables": [{ "name": "PULUMI_ACCESS_TOKEN", "type": "PARAMETER_STORE", "value": pulumi_token_param.name }, { "name": "FILESYSTEM_ID", "type": "PLAINTEXT", "value": efs_environment.file_system_id }] }, service_role=codebuild_service_role.arn, opts=ResourceOptions(depends_on=[vpc_environment])) outputs = {"pulumi_token_param_name": pulumi_token_param.name} self.set_outputs(outputs)
import json import pulumi from pulumi_aws import secretsmanager, iam from iam.policy import get_identity_policy_document, get_assume_role_policy_document, \ get_secret_resource_policy_document noaccess_role = iam.Role("noaccess-role", name="NoAccessRole", assume_role_policy=get_assume_role_policy_document()) trusted_role = iam.Role("trusted-role", name="TrustedRole", assume_role_policy=get_assume_role_policy_document()) identity_policy = iam.Policy("identity-policy", name="IdentityPolicy", policy=get_identity_policy_document()) noaccess_role_attachment = iam.RolePolicyAttachment( "noaccess-role-attachment", role=noaccess_role, policy_arn=identity_policy.arn) trusted_role_attachment = iam.RolePolicyAttachment( "trusted-role-attachment", role=trusted_role, policy_arn=identity_policy.arn) secret_resource_policy_document = trusted_role.unique_id.apply(
from pulumi_infrastructure.development_environment import DevelopmentEnvironment environment = DevelopmentEnvironment( "ExamplePOC", github_repo_name="https://github.com/cloudspeak/brew-install-efs-poc.git", github_version_name="codebuild") # IAM example_role = iam.Role("ExampleFunctionRole", assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""") iam.RolePolicyAttachment( "VpcAccessPolicyAttach", policy_arn= "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole", role=example_role.name) # Lambda
# TEMPLATE_URL = config.get( 'template_url' ) or 'https://amazon-eks.s3-us-west-2.amazonaws.com/cloudformation/2019-02-11/amazon-eks-nodegroup.yaml' # Instance type(s) of the AMI INSTANCE_TYPE = config.get('instance_type') or 't3.medium' # https://learn.hashicorp.com/terraform/aws/eks-intro#worker-node-autoscaling-group # https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/launch_configuration.html.markdown # https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/iam_instance_profile.html.markdown eks_role = iam.Role( 'eks-role', assume_role_policy= '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":[\"sts:AssumeRole\"],\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"eks.amazonaws.com\"]}}]}', description='Allows EKS to manage clusters on your behalf.', max_session_duration='3600', path='/', force_detach_policies=False, ) ec2_role = iam.Role( 'ec2-role', assume_role_policy= '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":[\"sts:AssumeRole\"],\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]}}]}', max_session_duration='3600', path='/', force_detach_policies=False, ) eks_role_policy_0 = iam.RolePolicyAttachment(
from pulumi_aws import config, iam from pulumi import Config import json pulumi_config = Config() lambda_role = iam.Role( "ce-lambda-role", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "", }], }), ) lambda_role_policy_exec = iam.RolePolicyAttachment( "ce-lambda-role-policy-exec", role=lambda_role.id, policy_arn= "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", ) lambda_role_policy_ec2 = iam.RolePolicyAttachment( "ce-lambda-role-policy-ec2", role=lambda_role.id,
def create_lambda_execution_roles(region, account): # Create lambda execution role lambda_assume_role_policy = iam.get_policy_document(statements=[{ "actions": ["sts:AssumeRole"], "principals": [{ "identifiers": ["lambda.amazonaws.com"], "type": "Service", }], }]) lambda_execution_role = iam.Role( "sendMessagelambda", assume_role_policy=lambda_assume_role_policy.json) iam.RolePolicy("RolePolicyAttachment", role=lambda_execution_role.id, policy=f"""{{ "Version": "2012-10-17", "Statement": [ {{ "Effect": "Allow", "Action": [ "ec2:CreateNetworkInterface", "logs:CreateLogStream", "ec2:DescribeNetworkInterfaces", "ec2:DeleteNetworkInterface", "logs:CreateLogGroup", "logs:PutLogEvents" ], "Resource": "*" }}, {{ "Effect": "Allow", "Action": [ "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "arn:aws:logs:{region.name}:{account}:log-group:*" }}, {{ "Effect": "Allow", "Action": "logs:CreateLogGroup", "Resource": "arn:aws:logs:{region.name}:{account}:*" }}, {{ "Effect": "Allow", "Action": [ "execute-api:ManageConnections", "execute-api:Invoke" ], "Resource": [ "arn:aws:execute-api:{region.name}:{account}:*" ] }}, {{ "Action": "ec2:*", "Effect": "Allow", "Resource": "*" }} ] }} """) return {"role": lambda_execution_role}
region = config.region dynamoTable = dynamodb.Table('ReplicationTable', attributes=[{ 'Name': 'Id', 'Type': 'S' }], hash_key='Id', billing_mode='PAY_PER_REQUEST', stream_enabled=True, stream_view_type='NEW_IMAGE') bucket = s3.Bucket('ReplicationBucket') firehoseRole = iam.Role( 'ReplicationFirehoseRole', assume_role_policy=getFirehoseRoleTrustPolicyDocument(accountId)) deliveryStream = kinesis.FirehoseDeliveryStream('ReplicationDeliveryStream', destination='extended_s3', extended_s3_configuration={ 'bucketArn': bucket.arn, 'role_arn': firehoseRole.arn, 'compressionFormat': 'GZIP' }) firehoseRolePolicy = iam.RolePolicy( 'ReplicationDeliveryStreamPolicy', role=firehoseRole.name, policy=getFirehoseRolePolicyDocument(
source_credentials = codebuild.SourceCredential( resource_name="Github-Credentials", auth_type="PERSONAL_ACCESS_TOKEN", server_type="GITHUB", token="12343sdgqbweetb23", opts=pulumi.ResourceOptions(delete_before_replace=True, ), ) # Create a role for cicd cicd_role = iam.Role( resource_name="CICD-sample-Role", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["codebuild.amazonaws.com"] }, "Action": "sts:AssumeRole", }], }), ) # Create a policy for the cicd role cicd_policy = iam.RolePolicy( resource_name="CICD-sample-policy", policy=json.dumps({ "Version": "2012-10-17", "Statement": [ {
def __init__(self, name: str, stack: str, issue: str, runtime: str, handler: str, lambda_archive: pulumi.Input[pulumi.Archive], source_code_hash: str = None, memory_size_mb: int = 128, timeout: int = 1, opts: pulumi.ResourceOptions = None): """ Create Lambda for usage at CloudFront, please use us-east-1 provider in opts. Create Role and grant permissions for edgelambda.awsamazon.com :param name: Name of the component :param stack: Name of the stack, staging or prod for example, used for tags :param issue: Issue tracker id, used for tags :param runtime: Lambda runtime, supported runtimes: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-requirements-limits.html#lambda-requirements-lambda-function-configuration :param handler: Lambda handler :param lambda_archive: Archive with Lambda code :param source_code_hash: base64(sha256(lambda.zip)) :param memory_size_mb: Lambda memory size in Mb, 128 Mb max for viewer request and response events :param timeout: Lambda timeout, max 30 seconds for origin request and response events and max 5 seconds for viewer request and response events, see details at https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-requirements-limits.html#lambda-requirements-see-limits :param opts: Standard Pulumi ResourceOptions """ super().__init__('LambdaEdge', name, None, opts) self.name = name self.stack = stack self.issue = issue if timeout > 30: raise LambdaTimeoutValidation( 'Maximum timeout for lambda@edge is 30 seconds for origin events and 5 seconds for viewer events' ) self.tags = { 'lambda-edge': f'{self.name}-{self.stack}', 'stack': self.stack, 'issue': self.issue, } role = iam.Role(f'{name}-lambda-role', path='/service-role/', assume_role_policy=json.dumps(LAMBDA_ROLE), tags=self.tags, opts=pulumi.ResourceOptions(parent=self)) iam.RolePolicy(f'{name}-lambda-policy', role=role.id, policy=json.dumps(LAMBDA_CLOUDWATCH_POLICY), opts=pulumi.ResourceOptions(parent=self)) lambda_edge = lambda_.Function( f'{name}-lambda-edge', description=f'Handler for processing index.html for stack: {stack}, ' f'issue: {issue}', runtime=runtime, handler=handler, code=lambda_archive, source_code_hash=source_code_hash, memory_size=memory_size_mb, timeout=timeout, publish=True, tags=self.tags, role=role.arn, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f'{name}-lambda-edge-permission', action='lambda:GetFunction', function=lambda_edge, principal='edgelambda.amazonaws.com', opts=pulumi.ResourceOptions(parent=self)) self.timeout = lambda_edge.timeout self.arn = lambda_edge.arn self.lambda_edge = lambda_edge self.register_outputs({ 'timeout': self.timeout, 'arn': self.arn, })
def __init__(self, name, datalake_bucket: s3.Bucket = None, datalake_raw_path: str = None, fileproc_bucket: s3.Bucket = None, managed_policy_arns: List[str] = None, package_dir: str = None, tags: Dict[str, str] = None, opts: pulumi.ResourceOptions = None): super().__init__('hca:GlueNotificationLambda', name, None, opts) merged_tags = tags.copy() if tags else {} merged_tags.update({'hca:dataclassification': 'pii'}) role = iam.Role(f"{name}-role", path="/lambda/", description=f"role for glue notification lambda", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" } }] }), force_detach_policies=True, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) # attach managed policies if managed_policy_arns: for index, policy in enumerate(managed_policy_arns): iam.RolePolicyAttachment( f"{name}-attach-policy-{index}", policy_arn=policy, role=role, opts=pulumi.ResourceOptions(parent=self)) fileprocpolicy = iam.RolePolicy( f"{name}-inline-policy", role=role, policy=pulumi.Output.all(datalake_bucket.bucket, fileproc_bucket.bucket).apply( lambda b: inline_policy(b[0], b[1])), opts=pulumi.ResourceOptions(parent=self)) self.function = lambda_.Function( f"{name}-function", runtime='python3.6', description= 'copy files from fileproc bucket to datalake raw bucket and trigger glue jobs', handler='glue_notification.main', environment={ 'variables': { 'S3_DATALAKE_BUCKET': datalake_bucket, 'S3_RAW_PATH': datalake_raw_path, 'PULUMI_STACK': pulumi.get_stack(), 'PULUMI_PROJECT': pulumi.get_project() } }, memory_size=256, timeout=60, code=pulumi.AssetArchive({ # use lambda-glue-notification created with build.py '.': pulumi.FileArchive(package_dir), }), role=role.arn, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f"{name}-permission", action='lambda:InvokeFunction', principal='s3.amazonaws.com', function=self.function, source_arn=fileproc_bucket.arn, opts=pulumi.ResourceOptions(parent=self))
from pulumi_aws import config, iam import json ## EKS Cluster Role eks_role = iam.Role( 'eks-iam-role', assume_role_policy=json.dumps({ 'Version': '2012-10-17', 'Statement': [ { 'Action': 'sts:AssumeRole', 'Principal': { 'Service': 'eks.amazonaws.com' }, 'Effect': 'Allow', 'Sid': '' } ], }), ) iam.RolePolicyAttachment( 'eks-service-policy-attachment', role=eks_role.id, policy_arn='arn:aws:iam::aws:policy/AmazonEKSServicePolicy', ) iam.RolePolicyAttachment( 'eks-cluster-policy-attachment',
def __init__(self, name, opts=None): super().__init__("nuage:aws:Analytics", name, None, opts) account_id = get_caller_identity().account_id region = config.region bucket = s3.Bucket(f"{name}Bucket") firehose_role = iam.Role( f"{name}FirehoseRole", assume_role_policy=get_firehose_role_trust_policy_document(account_id), ) delivery_stream = kinesis.FirehoseDeliveryStream( f"{name}DeliveryStream", destination="extended_s3", extended_s3_configuration={ "bucketArn": bucket.arn, "role_arn": firehose_role.arn, "compressionFormat": "GZIP", }, opts=ResourceOptions(depends_on=[bucket, firehose_role]), ) firehose_role_policy = iam.RolePolicy( f"{name}DeliveryStreamPolicy", role=firehose_role.name, policy=get_firehose_role_policy_document( region, account_id, bucket.arn, delivery_stream.name ).apply(json.dumps), ) pinpoint_app = pinpoint.App(f"{name}PinpointApp") pinpoint_stream_role = iam.Role( f"{name}PinpointStreamRole", assume_role_policy=get_pinpoint_stream_role_trust_policy_document(), ) pinpoint_stream_role_policy = iam.RolePolicy( f"{name}PinpointStreamPolicy", role=pinpoint_stream_role.name, policy=get_pinpoint_stream_role_policy_document( region, account_id, delivery_stream.name, pinpoint_app.application_id ).apply(json.dumps), opts=ResourceOptions(depends_on=[pinpoint_stream_role, delivery_stream]), ) # IAM roles can take time to propogate so we have to add an artificial delay pinpoint_stream_role_delay = Delay( "EventStreamRoleDelay", 10, opts=ResourceOptions(depends_on=[pinpoint_stream_role_policy]), ) pinpoint_stream = pinpoint.EventStream( f"{name}PinpointEventStream", application_id=pinpoint_app.application_id, destination_stream_arn=delivery_stream.arn, role_arn=pinpoint_stream_role.arn, opts=ResourceOptions( depends_on=[delivery_stream, pinpoint_app, pinpoint_stream_role_delay,] ), ) self.set_outputs( { "bucket_name": bucket.id, "delivery_stream_name": delivery_stream.name, "destination_stream_arn": delivery_stream.arn, "pinpoint_application_name": pinpoint_app.name, "pinpoint_application_id": pinpoint_app.application_id, } )
pattern="{( \ $.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) \ || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) \ || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) \ || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) \ || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }" ) # Create role for cloudtrail service role = iam.Role("cloudwatch_log_stream_role", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Action": "sts:AssumeRole", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Effect": "Allow", "Sid": "" }] })) # function to create role policy JSON code def iam_role_generate(resource): return json.dumps({ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailCreateLogStream",
def __init__( self, name, should_create_gtm_tag=True, site_name: Input[str] = None, site_url: Input[str] = None, opts=None, ): """ :param should_create_gtm_tag: Whether or not a GTM environment should be created with a tag for calling Amplify and Google Analytics. :param site_name: The website name used for the Google Analytics property. If `should_create_gtm_tag` is `True`, this is required. :param site_url: The website URL used for the Google Analytics property. If `should_create_gtm_tag` is `True`, this is required. """ super().__init__("nuage:aws:Analytics", name, None, opts) account_id = get_caller_identity().account_id region = config.region bucket = s3.Bucket(f"{name}Bucket") firehose_role = iam.Role( f"{name}FirehoseRole", assume_role_policy=get_firehose_role_trust_policy_document(account_id), ) delivery_stream = kinesis.FirehoseDeliveryStream( f"{name}DeliveryStream", destination="extended_s3", extended_s3_configuration={ "bucketArn": bucket.arn, "role_arn": firehose_role.arn, "compressionFormat": "GZIP", }, opts=ResourceOptions(depends_on=[bucket, firehose_role]), ) firehose_role_policy = iam.RolePolicy( f"{name}DeliveryStreamPolicy", role=firehose_role.name, policy=get_firehose_role_policy_document( region, account_id, bucket.arn, delivery_stream.name ).apply(json.dumps), ) pinpoint_app = pinpoint.App(f"{name}PinpointApp") pinpoint_stream_role = iam.Role( f"{name}PinpointStreamRole", assume_role_policy=get_pinpoint_stream_role_trust_policy_document(), ) pinpoint_stream_role_policy = iam.RolePolicy( f"{name}PinpointStreamPolicy", role=pinpoint_stream_role.name, policy=get_pinpoint_stream_role_policy_document( region, account_id, delivery_stream.name, pinpoint_app.application_id ).apply(json.dumps), opts=ResourceOptions(depends_on=[pinpoint_stream_role, delivery_stream]), ) # IAM roles can take time to propogate so we have to add an artificial delay pinpoint_stream_role_delay = Delay( "EventStreamRoleDelay", 10, opts=ResourceOptions(depends_on=[pinpoint_stream_role_policy]), ) pinpoint_stream = pinpoint.EventStream( f"{name}PinpointEventStream", application_id=pinpoint_app.application_id, destination_stream_arn=delivery_stream.arn, role_arn=pinpoint_stream_role.arn, opts=ResourceOptions( depends_on=[delivery_stream, pinpoint_app, pinpoint_stream_role_delay,] ), ) outputs = { "bucket_name": bucket.id, "delivery_stream_name": delivery_stream.name, "destination_stream_arn": delivery_stream.arn, "pinpoint_application_name": pinpoint_app.name, "pinpoint_application_id": pinpoint_app.application_id, "gtm_container_id": None, "gtm_tag": None, "gtm_tag_no_script": None, "amplify_tag_id": None, "event_name": None, } if should_create_gtm_tag: if site_name is None: raise Exception("The site_name parameter is required for the GTM tag") if site_url is None: raise Exception("The site_url parameter is required for the GTM tag") gtm = GtmAnalytics(name, site_name, site_url) outputs = { **outputs, "gtm_container_id": gtm.container_id, "gtm_tag": gtm.tag, "gtm_tag_no_script": gtm.tag_no_script, "amplify_tag_id": gtm.amplify_tag_id, "event_name": gtm.event_name, } self.set_outputs(outputs)