def attach_iam_policies_to_role(cls, role: Role): """ Attach the necessary policies to read secrets from SSM and SecretsManager :param role: :param zone_id: :return: """ # TODO: Extract this in a managed policy route53_policy = PolicyStatement( resources=["*"], effect=Effect.ALLOW, actions=[ "route53:ListHostedZones", "route53:ListResourceRecordSets", ], ) route53_recordset_policy = PolicyStatement( resources=["arn:aws:route53:::hostedzone/*" ], # To be restricted to interested zone effect=Effect.ALLOW, actions=[ "route53:ChangeResourceRecordSets", "route53:ListTagsForResource", ], ) role.add_to_policy(route53_policy) role.add_to_policy(route53_recordset_policy)
def generate_codebuild_policy(scope, db_secret_arn): return iam.Policy( scope=scope, id="JVSANTOSTier1CodebuildPolicy", policy_name="JVSANTOS-codebuild-policy", statements=[ PolicyStatement(actions=["secretsmanager:GetSecretValue"], resources=[db_secret_arn], effect=Effect.ALLOW), PolicyStatement(actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], resources=["*"], effect=Effect.ALLOW), PolicyStatement(actions=[ "codebuild:CreateReportGroup", "codebuild:CreateReport", "codebuild:UpdateReport", "codebuild:BatchPutTestCases", "codebuild:BatchPutCodeCoverages" ], resources=["*"], effect=Effect.ALLOW), PolicyStatement(actions=[ "s3:GetObject*", "s3:GetBucket*", "s3:List*", "s3:DeleteObject*", "s3:PutObject", "s3:Abort*" ], resources=["*"], effect=Effect.ALLOW), PolicyStatement(actions=["codestar-connections:UseConnection"], resources=["*"], effect=Effect.ALLOW) ])
def __role(self) -> Role: """ A role for custom resource which manages git commits to codecommit. :return: Custom resource's role. """ return Role( self.__stack, self.__prefix + 'CiCdLambdaCustomCommitRole', inline_policies={ self.__prefix + 'CiCdLambdaCustomCommitPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ "codecommit:CreateCommit", ], resources=[self.__code_repository.repository_arn], effect=Effect.ALLOW), PolicyStatement(actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], resources=['*'], effect=Effect.ALLOW) ]) }, assumed_by=ServicePrincipal('lambda.amazonaws.com'))
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) table_name = "posts2" function_name = "cl2" email = "*****@*****.**" table = Table( self, "cl_posts", table_name=table_name, partition_key=Attribute(name="url", type=AttributeType.STRING), time_to_live_attribute="ttl", ) function = PythonFunction( self, "cl_function", function_name=function_name, entry="src", index="app.py", runtime=Runtime.PYTHON_3_8, environment={ "cl_email": email, "cl_table_name": table_name }, timeout=Duration.seconds(300), initial_policy=[ PolicyStatement( actions=["ses:SendEmail", "ses:VerifyEmailIdentity"], resources=[ f"arn:aws:ses:{self.region}:{self.account}:identity/{email}" ], ), PolicyStatement( actions=[ "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem" ], resources=[table.table_arn], ), ], ) with open("events/event.json") as f: event = json.load(f) Rule( self, "cl_schedule", schedule=Schedule.expression("cron(0 19 * * ? *)"), targets=[ LambdaFunction(function, event=RuleTargetInput.from_object(event)) ], )
class Policy(Construct): def __init__(self, scope: Construct, id: str) -> None: super().__init__(scope, id) policy = ManagedPolicy(self, "Policy") # (de)registerTaskDefinitions doesn't support specific resources ecs_task = PolicyStatement( actions=[ "ecs:DeregisterTaskDefinition", "ecs:RegisterTaskDefinition", ], resources=["*"], ) # ListTagsForResource cannot be set for a service only, but has to be # on the cluster (despite it only looking at the tags for the service). # We make a separate statement for this, to avoid giving other polcies # more rights than required, as they can be per service. self._cluster_statement = PolicyStatement(actions=[ "ecs:ListTagsForResource", ], ) # All other actions can be combined, as they don't collide. As policies # have a maximum amount of bytes they can consume, this spares a few of # them. self._statement = PolicyStatement(actions=[ "iam:PassRole", "ssm:GetParameter", "ssm:GetParameters", "ssm:PutParameter", "ecs:UpdateService", "ecs:DescribeServices", "cloudformation:UpdateStack", "cloudformation:DescribeStacks", ], ) policy.add_statements(ecs_task) policy.add_statements(self._cluster_statement) policy.add_statements(self._statement) def add_role(self, role: IRole) -> None: self._statement.add_resources(role.role_arn) def add_parameter(self, parameter: IParameter) -> None: self._statement.add_resources(parameter.parameter_arn) def add_service(self, service: IService) -> None: self._statement.add_resources(service.service_arn) def add_cluster(self, cluster: ICluster) -> None: self._cluster_statement.add_resources(cluster.cluster_arn) def add_stack(self, stack: Stack) -> None: self._statement.add_resources(stack.stack_id)
def _create_s3_access_role(self, identity_pool: CfnIdentityPool, s3_bucket: Bucket) -> Role: role = Role(self, 'DemoRole', role_name='CognitoDemoBucketAccess', assumed_by=WebIdentityPrincipal( 'cognito-identity.amazonaws.com', conditions={ 'StringEquals': { 'cognito-identity.amazonaws.com:aud': identity_pool.ref } }), inline_policies={ 'ListBucket': PolicyDocument(statements=[ PolicyStatement(effect=Effect.ALLOW, actions=['s3:ListBucket'], resources=[s3_bucket.bucket_arn]) ]) }) CfnOutput(self, 'ROLE_ARN', value=role.role_arn) return role
def attach_iam_policies_to_role(cls, role: Role): """ Attach the inline policies necessary to manage autoscaling using the kubernetes cluster autoscaler :param role: :return: """ # TODO: Extract this in a managed policy policies: Dict[str, PolicyStatement] = { 'cluster_autoscaler': PolicyStatement( resources=["*"], effect=Effect.ALLOW, actions=[ "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeAutoScalingInstances", "autoscaling:DescribeLaunchConfigurations", "autoscaling:DescribeTags", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup", "ec2:DescribeLaunchTemplateVersions", ]), } for policy in policies.values(): role.add_to_policy(policy)
def __init__(self, scope: Stack, name: str) -> None: """ Constructor. :param scope: CloudFormation stack in which this function will be deployed. :param name: The name of the function. """ self.__name = name super().__init__( scope=scope, id=name, uuid=f'{name}-uuid', function_name=name, code=self.__code(), handler='index.handler', runtime=Runtime.PYTHON_3_8, timeout=Duration.minutes(1), ) # Add permission to create deployments. Since this is a singleton lambda function, # we can not specify a specific api gateway resource. self.add_to_role_policy( PolicyStatement(actions=['apigateway:POST', 'apigateway:PATCH'], resources=['*']))
def __create_s3_trigger_lambda_execution_role( self, bucket_name: str, cloudfront_distribution: aws_cloudfront.Distribution ) -> aws_iam.Role: return aws_iam.Role( self, "S3TriggerLambdaExecutionRole", assumed_by=ServicePrincipal('lambda.amazonaws.com'), description= 'Execution role that allows the lambda function to get the uploaded zip from S3, upload the ' 'unpacked one and invalidate the CDN', path='/', managed_policies=[ ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole') ], inline_policies={ 's3_trigger_artifacts-upload-role': PolicyDocument(statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ 's3:PutObject', 's3:GetObject', 's3:ListObject', 's3:DeleteObject', 's3:HeadBucket', 'cloudfront:CreateInvalidation' ], resources=[ f'arn:aws:cloudfront::{Fn.ref("AWS::AccountId")}:distribution/' f'{cloudfront_distribution.distribution_id}', f'arn:aws:s3:::{bucket_name}', f'arn:aws:s3:::{bucket_name}/*' ]) ]) })
def init_lambda(self): tmp_dir = install_lambda_code_requirements() lambda_code = Code.from_asset(str(tmp_dir), exclude=[ ".env", "__main*", "*.dist-info", "bin", "requirements.txt", ]) lambda_function = Function(self, "lambda", code=lambda_code, handler="main.handler", runtime=Runtime.PYTHON_3_8) lambda_function.role.assume_role_policy.add_statements( PolicyStatement( actions=["sts:AssumeRole"], principals=[ServicePrincipal("edgelambda.amazonaws.com")])) version = Version(self, "version", lambda_=lambda_function) apply_removal_policy(lambda_function, version, lambda_function.role) return version
def createPolicy(this, testLambda: _lambda.Function) -> None: projectPolicy: PolicyStatement = PolicyStatement( effect=aws_iam.Effect.ALLOW, resources=[testLambda.function_arn], actions=[ "s3:*", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ]) return projectPolicy
def add_contact_api(stack: CDKMasterStack, project_name: str, domain: str, forwarding_email: str): module_path = os.path.dirname(__file__) lambda_path = os.path.join(module_path, "lambda") api_path = "contact" base_lambda = aws_lambda.Function( stack, 'ContactFormLambda', handler='lambda_handler.handler', runtime=aws_lambda.Runtime.PYTHON_3_7, environment={ "TARGET_EMAIL": forwarding_email, "SENDER_EMAIL": f"contact@{domain}", "SENDER_NAME": f"{project_name.capitalize()}", "SENDER": f"{project_name.capitalize()} Contact Form <contact@{domain}>" }, code=aws_lambda.Code.asset(lambda_path), ) base_lambda.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=["ses:SendEmail", "ses:SendRawEmail"])) verify_domain_create_call = AwsSdkCall( service="SES", action="verifyDomainIdentity", parameters={"Domain": domain}, physical_resource_id=PhysicalResourceId.from_response( "VerificationToken")) policy_statement = PolicyStatement(actions=["ses:VerifyDomainIdentity"], resources=["*"]) verify_domain_identity = AwsCustomResource( stack, "VerifyDomainIdentity", on_create=verify_domain_create_call, policy=AwsCustomResourcePolicy.from_statements( statements=[policy_statement])) aws_route53.TxtRecord( stack, "SESVerificationRecord", zone=stack.zone, record_name=f"_amazonses.{domain}", values=[ verify_domain_identity.get_response_field("VerificationToken") ]) stack.add_api_method(api_path, "POST", base_lambda)
def __init__(self, scope: Construct, id: str, name: str) -> None: super().__init__(scope, id) self._key = Key(self, f"kms_key_{name}") self._key.add_alias(f"alias/kms-{name}") self._key.add_to_resource_policy( PolicyStatement(effect=Effect.ALLOW, actions=["kms:*"], principals=[AnyPrincipal()], resources=["*"]))
def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None: new_kwargs = {'env': kwargs['env']} super().__init__(scope, _id, **new_kwargs) # Create staging Kinesis Data Stream, set to 1 shard in this sample code kinesis_stream = kinesis.Stream(self, STREAM_NAME, stream_name=STREAM_NAME, shard_count=1) # Create replicator lambda function that consumes the Kinesis stream and writes to target DDB table target_table_name = kwargs['target_table_name'] dlq_sqs = sqs.Queue(self, 'replicator_failure_Q') replicator_lambda = lambda_.Function( self, 'replicator_kinesis', code=lambda_.Code.asset("../lambda_replicator"), runtime=lambda_.Runtime.PYTHON_3_7, handler='replicator_kinesis.lambda_handler', timeout=core.Duration.seconds(60), environment={'TARGET_TABLE': target_table_name}) kinesis_stream.grant_read(replicator_lambda) replicator_lambda.add_event_source( KinesisEventSource( stream=kinesis_stream, starting_position=lambda_.StartingPosition.LATEST, batch_size=500, retry_attempts=100, parallelization_factor=10, on_failure=SqsDlq(dlq_sqs))) target_table = ddb.Table.from_table_name(self, target_table_name, target_table_name) target_table.grant_read_write_data(replicator_lambda) # The replicator lambda will put metrics to Cloudwatch put_metrics_policy = PolicyStatement( actions=['cloudwatch:PutMetricData'], effect=Effect.ALLOW, resources=['*']) replicator_lambda.add_to_role_policy(put_metrics_policy) # Create replicator-stats table for statistics of replicator replicator_stats_table = ddb.Table( self, REPLICATOR_STATS_TABLE_NAME, table_name=REPLICATOR_STATS_TABLE_NAME, partition_key=ddb.Attribute(name="PK", type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST, removal_policy=RemovalPolicy.DESTROY) replicator_stats_table.grant_read_write_data(replicator_lambda) core.CfnOutput(self, "replicator_stats_table", value=replicator_stats_table.table_name)
def get_public_dns_policy_statement(): statement = PolicyStatement( effect=Effect.ALLOW, actions=[ "ec2:DescribeNetworkInterfaces", "ecs:DescribeClusters", "ecs:ListTagsForResource", "route53:ChangeResourceRecordSets", ], resources=['*'], ) return statement
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.event_bus = EventBus(scope=self, id='CustomEventBus', event_bus_name='CustomEventBus') self.source = Function( scope=self, id=f'SourceFunction', function_name=f'SourceFunction', code=Code.from_asset(path='./code_source/'), handler='index.handler', runtime=Runtime.PYTHON_3_6, ) self.source.add_to_role_policy(statement=PolicyStatement( actions=['events:PutEvents'], resources=[self.event_bus.event_bus_arn])) """ Define rule. """ self.rule = Rule( scope=self, id='EventBusRule', description='Sample description.', enabled=True, event_bus=self.event_bus, event_pattern=EventPattern(detail={ 'Domain': ["MedInfo"], 'Reason': ["InvokeTarget"] }), rule_name='EventBusRule', ) """ Add target. """ self.target = Function( scope=self, id=f'TargetFunction', function_name=f'TargetFunction', code=Code.from_asset(path='./code_target/'), handler='index.handler', runtime=Runtime.PYTHON_3_6, ) self.target: Union[IRuleTarget, LambdaFunction] = LambdaFunction( handler=self.target) self.rule.add_target(target=self.target)
def grant_lambda_access_to_secrets(self, rotator_lambda, secret_configs): """ Adds a custom policy to the lambda role which gives it access to the static secrets used for authentication. Args: rotator_lambda (Function): The lambda function used for rotating a secret secret_configs (Dictionary): list of configurations for static secrets used for authentication by rotator_lambda """ for secret_config in secret_configs: secret_arn = get_secret_arn(secret_config) rotator_lambda.add_to_role_policy( PolicyStatement(effect=Effect.ALLOW, resources=[secret_arn], actions=["secretsmanager:GetSecretValue"]))
def grant_lambda_access_to_rotate_secret(self, rotator_lambda, secret_config): """ Adds a custom policy to the lambda role which gives it access to the secret being rotated. Documentation can be found here: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot_rotation.html#tshoot-lambda-accessdeniedduringrotation Args: rotator_lambda (Function): The lambda function used for rotating a secret secret_config (Dictionary): The configuration for the secret specified in secrets_config.json """ secret_arn = get_secret_arn(secret_config) rotator_lambda.add_to_role_policy( PolicyStatement(effect=Effect.ALLOW, resources=[secret_arn], actions=[ "secretsmanager:DescribeSecret", "secretsmanager:GetSecretValue", "secretsmanager:PutSecretValue", "secretsmanager:UpdateSecretVersionStage" ])) rotator_lambda.add_to_role_policy( PolicyStatement(effect=Effect.ALLOW, resources=['*'], actions=["secretsmanager:GetRandomPassword"]))
def _create_lambdas_config(self): self.lambdas_config = { "api-movies_by_id": { "layers": ["utils", "databases"], "variables": { "MOVIES_DATABASE_NAME": self.movies_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:GetItem"], resources=[self.movies_table.table_arn] ) ], "timeout": 3, "memory": 128 }, "api-movies": { "layers": ["utils", "databases"], "variables": { "MOVIES_DATABASE_NAME": self.movies_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[f"{self.movies_table.table_arn}/index/tmdb_id"] ), PolicyStatement( actions=["dynamodb:UpdateItem"], resources=[self.movies_table.table_arn] ), ], "timeout": 10, "memory": 128 }, }
def __create_cloud_front_lambda_execution_role(self) -> aws_iam.Role: role = aws_iam.Role( self, 'CloudFrontLambdaExecutionRole', assumed_by=ServicePrincipal('lambda.amazonaws.com'), path='/', managed_policies=[ ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole') ]) role.assume_role_policy.add_statements( PolicyStatement( principals=[ServicePrincipal('edgelambda.amazonaws.com')], actions=['sts:AssumeRole'])) return role
def create_managed_policy(self): statement = PolicyStatement(effect=Effect.ALLOW, actions=[ "s3:PutObject", ], resources=[ f'{self.bucket.bucket_arn}/*', ]) return ManagedPolicy( self, 'Managed Policy', managed_policy_name='sfn_lambda_policy', statements=[statement], )
def __init__(self, scope: Construct, id: str, envs: EnvSettings): super().__init__(scope, id) self.key = Key(self, id="Key", alias=f"alias/{envs.project_name}") self.key.add_to_resource_policy( PolicyStatement(actions=["kms:Encrypt", "kms:Decrypt"], principals=[AccountRootPrincipal()], resources=["*"])) CfnOutput( self, "KmsKeyArnOutput", export_name=self.get_kms_arn_output_export_name(envs), value=self.key.key_arn, )
def get_provisioning_lambda_role(self, role_arn: str): role = iam.Role( scope=self, id=f'LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole") ], ) # role.add_to_policy(PolicyStatement(actions=["iam:PassRole"], resources=['*'])) print(f'adding iam:PassRole to "role_arn"') role.add_to_policy( PolicyStatement(actions=["iam:PassRole"], resources=[role_arn])) return role
def create_default_infrastructure_config( self, construct_id: str) -> CfnInfrastructureConfiguration: """ Create the default infrastructure config, which defines the permissions needed by Image Builder during image creation. """ image_builder_role_name = f"DeadlineMachineImageBuilderRole{construct_id}" image_builder_role = Role( self, image_builder_role_name, assumed_by=ServicePrincipal("ec2.amazonaws.com"), role_name=image_builder_role_name) image_builder_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( 'EC2InstanceProfileForImageBuilder')) image_builder_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore')) image_builder_role.add_to_policy( PolicyStatement(actions=[ 's3:Get*', 's3:List*', ], resources=['arn:aws:s3:::thinkbox-installers/*'])) image_builder_profile_name = f"DeadlineMachineImageBuilderPolicy{construct_id}" image_builder_profile = CfnInstanceProfile( self, image_builder_profile_name, instance_profile_name=image_builder_profile_name, roles=[image_builder_role_name]) image_builder_profile.add_depends_on( image_builder_role.node.default_child) infrastructure_configuration = CfnInfrastructureConfiguration( self, f"InfrastructureConfig{construct_id}", name=f"DeadlineInfrastructureConfig{construct_id}", instance_profile_name=image_builder_profile_name) infrastructure_configuration.add_depends_on(image_builder_profile) return infrastructure_configuration
def role(self) -> Role: inline_policies = { "ElasticsearchIndexPrividerFunctionElasticsearchAccessPolicy": PolicyDocument(statements=[ PolicyStatement( # TODO restrict this to appropriate API calls. actions=["es:*"], resources=["*"], ) ]), } return Role( scope=self.__scope, id=f"{self.__name}ElasticsearchIndexResourceProviderRole", assumed_by=ServicePrincipal("lambda.amazonaws.com"), description= f"A role for ElasticsearchIndexResourceProvider lambda function.", inline_policies=inline_policies, )
def attach_iam_policies_to_role(cls, role: Role): """ Attach the necessary policies to read secrets from SSM and SecretsManager :param role: :return: """ # TODO: Extract this in a managed policy secretsmanager_readonly_policy = PolicyStatement( resources=["*"], effect=Effect.ALLOW, actions=[ "secretsmanager:GetResourcePolicy", "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret", "secretsmanager:ListSecretVersionIds", ] ) role.add_to_policy(secretsmanager_readonly_policy) role.add_managed_policy(ManagedPolicy.from_aws_managed_policy_name('AmazonSSMReadOnlyAccess'))
def create_ecs_lambda(self, cluster: ICluster, auto_scaling_group: AutoScalingGroup): lambda_func = Function( self, "LambdaECS", code=Code.from_asset("./lambdas/nlb-ecs"), handler="index.lambda_handler", runtime=Runtime.PYTHON_3_8, timeout=Duration.seconds(30), environment={ "AUTO_SCALING_GROUP_NAME": auto_scaling_group.auto_scaling_group_name, }, ) lambda_func.add_to_role_policy( PolicyStatement( actions=[ "autoscaling:DescribeAutoScalingGroups", "ssm:SendCommand", "ssm:GetCommandInvocation", ], resources=[ "*", ], )) Rule( self, "ECS", event_pattern=EventPattern( detail_type=["ECS Task State Change"], detail={ "clusterArn": [cluster.cluster_arn], }, source=["aws.ecs"], ), targets=[LambdaFunction(lambda_func)], )
def __create_s3_source_bucket_policy( self, s3_source_bucket: aws_s3.Bucket, cloud_front_origin_access_identity: aws_cloudfront.OriginAccessIdentity ): return aws_s3.CfnBucketPolicy( self, 'S3SourceBucketPolicy', bucket=s3_source_bucket.bucket_name, policy_document=PolicyDocument(statements=[ PolicyStatement( effect=Effect.ALLOW, actions=['s3:GetObject'], sid='1', resources=[ f'arn:aws:s3:::{s3_source_bucket.bucket_name}/*' ], principals=[ aws_iam.ArnPrincipal( f'arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity ' f'{cloud_front_origin_access_identity.origin_access_identity_name}' ) ]) ]))
def __init__(self, scope: core.Construct, id: str, application_prefix: str, suffix: str, kda_role: Role, **kwargs): super().__init__(scope, id, **kwargs) stack = Stack.of(self) region = stack.region # Create Cognito User Pool self.__user_pool = CfnUserPool( scope=self, id='UserPool', admin_create_user_config={'allowAdminCreateUserOnly': True}, policies={'passwordPolicy': { 'minimumLength': 8 }}, username_attributes=['email'], auto_verified_attributes=['email'], user_pool_name=application_prefix + '_user_pool') # Create a Cognito User Pool Domain using the newly created Cognito User Pool CfnUserPoolDomain(scope=self, id='CognitoDomain', domain=application_prefix + '-' + suffix, user_pool_id=self.user_pool.ref) # Create Cognito Identity Pool self.__id_pool = CfnIdentityPool( scope=self, id='IdentityPool', allow_unauthenticated_identities=False, cognito_identity_providers=[], identity_pool_name=application_prefix + '_identity_pool') trust_relationship = FederatedPrincipal( federated='cognito-identity.amazonaws.com', conditions={ 'StringEquals': { 'cognito-identity.amazonaws.com:aud': self.id_pool.ref }, 'ForAnyValue:StringLike': { 'cognito-identity.amazonaws.com:amr': 'authenticated' } }, assume_role_action='sts:AssumeRoleWithWebIdentity') # IAM role for master user master_auth_role = Role(scope=self, id='MasterAuthRole', assumed_by=trust_relationship) # Role for authenticated user limited_auth_role = Role(scope=self, id='LimitedAuthRole', assumed_by=trust_relationship) # Attach Role to Identity Pool CfnIdentityPoolRoleAttachment( scope=self, id='userPoolRoleAttachment', identity_pool_id=self.id_pool.ref, roles={'authenticated': limited_auth_role.role_arn}) # Create master-user-group CfnUserPoolGroup(scope=self, id='AdminsGroup', user_pool_id=self.user_pool.ref, group_name='master-user-group', role_arn=master_auth_role.role_arn) # Create limited-user-group CfnUserPoolGroup(scope=self, id='UsersGroup', user_pool_id=self.user_pool.ref, group_name='limited-user-group', role_arn=limited_auth_role.role_arn) # Role for the Elasticsearch service to access Cognito es_role = Role(scope=self, id='EsRole', assumed_by=ServicePrincipal(service='es.amazonaws.com'), managed_policies=[ ManagedPolicy.from_aws_managed_policy_name( 'AmazonESCognitoAccess') ]) # Use the following command line to generate the python dependencies layer content # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt # Build the lambda layer assets subprocess.call([ 'pip', 'install', '-t', 'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages', '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt', '--upgrade' ]) requirements_layer = _lambda.LayerVersion( scope=self, id='PythonRequirementsTemplate', code=_lambda.Code.from_asset( 'streaming/streaming_cdk/lambda-layer'), compatible_runtimes=[_lambda.Runtime.PYTHON_3_8]) # This lambda function will bootstrap the Elasticsearch cluster bootstrap_function_name = 'AESBootstrap' register_template_lambda = _lambda.Function( scope=self, id='RegisterTemplate', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( 'streaming/streaming_cdk/bootstrap-lambda'), handler='es-bootstrap.lambda_handler', environment={ 'REGION': region, 'KDA_ROLE_ARN': kda_role.role_arn, 'MASTER_ROLE_ARN': master_auth_role.role_arn }, layers=[requirements_layer], timeout=Duration.minutes(15), function_name=bootstrap_function_name) lambda_role = register_template_lambda.role lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogGroup'], resources=[stack.format_arn(service='logs', resource='*')])) lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogStream', 'logs:PutLogEvents'], resources=[ stack.format_arn(service='logs', resource='log_group', resource_name='/aws/lambda/' + bootstrap_function_name + ':*') ])) # Let the lambda assume the master role so that actions can be executed on the cluster # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/ lambda_role.add_to_policy( PolicyStatement(actions=['sts:AssumeRole'], resources=[master_auth_role.role_arn])) master_auth_role.assume_role_policy.add_statements( PolicyStatement(actions=['sts:AssumeRole'], principals=[lambda_role])) # List all the roles that are allowed to access the Elasticsearch cluster. roles = [ ArnPrincipal(limited_auth_role.role_arn), ArnPrincipal(master_auth_role.role_arn), ArnPrincipal(kda_role.role_arn) ] # The users if register_template_lambda and register_template_lambda.role: roles.append(ArnPrincipal( lambda_role.role_arn)) # The lambda used to bootstrap # Create kms key kms_key = Key(scope=self, id='kms-es', alias='custom/es', description='KMS key for Elasticsearch domain', enable_key_rotation=True) # AES Log Groups es_app_log_group = logs.LogGroup(scope=self, id='EsAppLogGroup', retention=logs.RetentionDays.ONE_WEEK, removal_policy=RemovalPolicy.RETAIN) # Create the Elasticsearch domain es_domain_arn = stack.format_arn(service='es', resource='domain', resource_name=application_prefix + '/*') es_access_policy = PolicyDocument(statements=[ PolicyStatement(principals=roles, actions=[ 'es:ESHttpGet', 'es:ESHttpPut', 'es:ESHttpPost', 'es:ESHttpDelete' ], resources=[es_domain_arn]) ]) self.__es_domain = es.CfnDomain( scope=self, id='searchDomain', elasticsearch_cluster_config={ 'instanceType': 'r5.large.elasticsearch', 'instanceCount': 2, 'dedicatedMasterEnabled': True, 'dedicatedMasterCount': 3, 'dedicatedMasterType': 'r5.large.elasticsearch', 'zoneAwarenessEnabled': True, 'zoneAwarenessConfig': { 'AvailabilityZoneCount': '2' }, }, encryption_at_rest_options={ 'enabled': True, 'kmsKeyId': kms_key.key_id }, node_to_node_encryption_options={'enabled': True}, ebs_options={ 'volumeSize': 10, 'ebsEnabled': True }, elasticsearch_version='7.9', domain_name=application_prefix, access_policies=es_access_policy, cognito_options={ 'enabled': True, 'identityPoolId': self.id_pool.ref, 'roleArn': es_role.role_arn, 'userPoolId': self.user_pool.ref }, advanced_security_options={ 'enabled': True, 'internalUserDatabaseEnabled': False, 'masterUserOptions': { 'masterUserArn': master_auth_role.role_arn } }, domain_endpoint_options={ 'enforceHttps': True, 'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07' }, # log_publishing_options={ # # 'ES_APPLICATION_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn # # }, # # 'AUDIT_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # }, # # 'SEARCH_SLOW_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # }, # # 'INDEX_SLOW_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # } # } ) # Not yet on the roadmap... # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283 # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True) # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2) # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch') # Deny all roles from the authentication provider - users must be added to groups # This lambda function will bootstrap the Elasticsearch cluster cognito_function_name = 'CognitoFix' cognito_template_lambda = _lambda.Function( scope=self, id='CognitoFixLambda', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( 'streaming/streaming_cdk/cognito-lambda'), handler='handler.handler', environment={ 'REGION': scope.region, 'USER_POOL_ID': self.__user_pool.ref, 'IDENTITY_POOL_ID': self.__id_pool.ref, 'LIMITED_ROLE_ARN': limited_auth_role.role_arn }, timeout=Duration.minutes(15), function_name=cognito_function_name) lambda_role = cognito_template_lambda.role lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogGroup'], resources=[stack.format_arn(service='logs', resource='*')])) lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogStream', 'logs:PutLogEvents'], resources=[ stack.format_arn(service='logs', resource='log_group', resource_name='/aws/lambda/' + cognito_function_name + ':*') ])) lambda_role.add_to_policy( PolicyStatement(actions=['cognito-idp:ListUserPoolClients'], resources=[self.user_pool.attr_arn])) lambda_role.add_to_policy( PolicyStatement(actions=['iam:PassRole'], resources=[limited_auth_role.role_arn])) cognito_id_res = Fn.join(':', [ 'arn:aws:cognito-identity', scope.region, scope.account, Fn.join('/', ['identitypool', self.__id_pool.ref]) ]) lambda_role.add_to_policy( PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'], resources=[cognito_id_res])) # Get the Domain Endpoint and register it with the lambda as environment variable. register_template_lambda.add_environment( 'DOMAIN', self.__es_domain.attr_domain_endpoint) CfnOutput(scope=self, id='createUserUrl', description="Create a new user in the user pool here.", value="https://" + scope.region + ".console.aws.amazon.com/cognito/users?region=" + scope.region + "#/pool/" + self.user_pool.ref + "/users") CfnOutput(scope=self, id='kibanaUrl', description="Access Kibana via this URL.", value="https://" + self.__es_domain.attr_domain_endpoint + "/_plugin/kibana/") bootstrap_lambda_provider = Provider( scope=self, id='BootstrapLambdaProvider', on_event_handler=register_template_lambda) CustomResource(scope=self, id='ExecuteRegisterTemplate', service_token=bootstrap_lambda_provider.service_token, properties={'Timeout': 900}) cognito_lambda_provider = Provider( scope=self, id='CognitoFixLambdaProvider', on_event_handler=cognito_template_lambda) cognito_fix_resource = CustomResource( scope=self, id='ExecuteCognitoFix', service_token=cognito_lambda_provider.service_token) cognito_fix_resource.node.add_dependency(self.__es_domain)
def add_to_policy(self, actions: List[str], resources=List[str]): self.role.add_to_policy( PolicyStatement(actions=actions, resources=resources))