def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) on_event = _lambda.Function( self, 'ConnectHandler', runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset('lambda'), handler='connect_create.handler', ) on_event.add_to_role_policy( iam.PolicyStatement(actions=[ "connect:CreateInstance", "connect:DeleteInstance", "ds:CreateAlias", "ds:AuthorizeApplication", "ds:UnauthorizeApplication", "ds:CreateIdentityPoolDirectory", "ds:CreateDirectory", "ds:DescribeDirectories", "ds:CheckAlias", "ds:DeleteDirectory", "iam:AttachRolePolicy", "iam:CreateServiceLinkedRole", "iam:PutRolePolicy" ], resources=["*"])) my_provider = cr.Provider( self, "MyProvider", on_event_handler=on_event, #is_complete_handler=is_complete, # optional async "waiter" log_retention=logs.RetentionDays.ONE_DAY) CustomResource(self, "Resource1", service_token=my_provider.service_token)
def create_lambda(self, lb_enginframe): # Lambda role lambda_role = iam.Role( self, id="LambdaRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")) lambda_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "acm:ImportCertificate", "acm:ListCertificates", "acm:DeleteCertificate", "acm:DescribeCertificate", "logs:CreateLogStream", "logs:CreateLogGroup", "logs:PutLogEvents" ], resources=["*"], )) # Lambda to create the ALB https certificate lambda_cert = _lambda.Function(self, "lambda_create_cert", runtime=_lambda.Runtime.PYTHON_3_7, handler="cert.lambda_handler", code=_lambda.Code.asset("./lambda"), timeout=core.Duration.seconds(600), role=lambda_role) lambda_cs = CustomResource(self, "Resource1", service_token=lambda_cert.function_arn, properties={ "LoadBalancerDNSName": lb_enginframe.load_balancer_dns_name }) return lambda_cs
def __init__( self, scope: core.Construct, id: str, policy_id: str, account_targets: List[str] = None, organization_unit_targets: List[str] = None, ) -> None: super().__init__(scope, id) on_event = _lambda.Function( self, "ON-SCP-ATTACHMENT-EVENT", runtime=_lambda.Runtime.PYTHON_3_8, handler="app.on_event", timeout=core.Duration.seconds(600), memory_size=128, code=_lambda.Code.asset(os.path.join(dirname, "attachment_lambda")), description="Service control policy attachment resource", ) on_event.add_to_role_policy( iam.PolicyStatement( actions=[ "organizations:CreatePolicy", "organizations:DeletePolicy", "organizations:AttachPolicy", "organizations:DetachPolicy", ], resources=["*"], )) attachment_provider = cr.Provider( self, "ON_EVENT_CUSTOM_RESOURCE_PROVIDER", on_event_handler=on_event, ) CustomResource( self, "scp-attachment-custom-resource", service_token=attachment_provider.service_token, properties={ "PolicyId": policy_id, "AccountTargets": account_targets, "OrganizationUnitTargets": organization_unit_targets, }, )
def __init__(self, scope: core.Construct, id: str, f_lambda, bot_locale) -> None: super().__init__(scope, id) on_event = _lambda.Function( self, "ON-EVENT", runtime=_lambda.Runtime.PYTHON_3_6, handler="lex-bot-provider.on_event", timeout=core.Duration.seconds(60), memory_size=256, code=_lambda.Code.asset("./custom_resource_lex_bot/lambda"), description='PROCESA EVENTOS CUSTOM RESOURCE', environment={ 'LAMBDA_ARN_FULLFILL': f_lambda.function_arn, 'BOT_LOCALE': bot_locale }) is_complete = _lambda.Function( self, "IS-COMPLETE", runtime=_lambda.Runtime.PYTHON_3_6, handler="lex-bot-provider.is_complete", timeout=core.Duration.seconds(60), memory_size=256, code=_lambda.Code.asset("./custom_resource_lex_bot/lambda"), description='IS COMPLETE HANDLER') on_event.add_to_role_policy( iam.PolicyStatement(actions=["lex:*"], resources=['*'])) is_complete.add_to_role_policy( iam.PolicyStatement(actions=["lex:*"], resources=['*'])) my_provider = cr.Provider( self, "ON_EVENT_CUSTOM_RESOURCE_PROVIDER", on_event_handler=on_event, is_complete_handler=is_complete, # optional async "waiter" log_retention=logs.RetentionDays.ONE_DAY) CustomResource(self, "lexbotcustom", service_token=my_provider.service_token)
def __init__(self, scope: core.Construct, id: str, application_prefix: str, suffix: str, kda_role: Role, **kwargs): super().__init__(scope, id, **kwargs) stack = Stack.of(self) region = stack.region # Create Cognito User Pool self.__user_pool = CfnUserPool( scope=self, id='UserPool', admin_create_user_config={'allowAdminCreateUserOnly': True}, policies={'passwordPolicy': { 'minimumLength': 8 }}, username_attributes=['email'], auto_verified_attributes=['email'], user_pool_name=application_prefix + '_user_pool') # Create a Cognito User Pool Domain using the newly created Cognito User Pool CfnUserPoolDomain(scope=self, id='CognitoDomain', domain=application_prefix + '-' + suffix, user_pool_id=self.user_pool.ref) # Create Cognito Identity Pool self.__id_pool = CfnIdentityPool( scope=self, id='IdentityPool', allow_unauthenticated_identities=False, cognito_identity_providers=[], identity_pool_name=application_prefix + '_identity_pool') trust_relationship = FederatedPrincipal( federated='cognito-identity.amazonaws.com', conditions={ 'StringEquals': { 'cognito-identity.amazonaws.com:aud': self.id_pool.ref }, 'ForAnyValue:StringLike': { 'cognito-identity.amazonaws.com:amr': 'authenticated' } }, assume_role_action='sts:AssumeRoleWithWebIdentity') # IAM role for master user master_auth_role = Role(scope=self, id='MasterAuthRole', assumed_by=trust_relationship) # Role for authenticated user limited_auth_role = Role(scope=self, id='LimitedAuthRole', assumed_by=trust_relationship) # Attach Role to Identity Pool CfnIdentityPoolRoleAttachment( scope=self, id='userPoolRoleAttachment', identity_pool_id=self.id_pool.ref, roles={'authenticated': limited_auth_role.role_arn}) # Create master-user-group CfnUserPoolGroup(scope=self, id='AdminsGroup', user_pool_id=self.user_pool.ref, group_name='master-user-group', role_arn=master_auth_role.role_arn) # Create limited-user-group CfnUserPoolGroup(scope=self, id='UsersGroup', user_pool_id=self.user_pool.ref, group_name='limited-user-group', role_arn=limited_auth_role.role_arn) # Role for the Elasticsearch service to access Cognito es_role = Role(scope=self, id='EsRole', assumed_by=ServicePrincipal(service='es.amazonaws.com'), managed_policies=[ ManagedPolicy.from_aws_managed_policy_name( 'AmazonESCognitoAccess') ]) # Use the following command line to generate the python dependencies layer content # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt # Build the lambda layer assets subprocess.call([ 'pip', 'install', '-t', 'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages', '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt', '--upgrade' ]) requirements_layer = _lambda.LayerVersion( scope=self, id='PythonRequirementsTemplate', code=_lambda.Code.from_asset( 'streaming/streaming_cdk/lambda-layer'), compatible_runtimes=[_lambda.Runtime.PYTHON_3_8]) # This lambda function will bootstrap the Elasticsearch cluster bootstrap_function_name = 'AESBootstrap' register_template_lambda = _lambda.Function( scope=self, id='RegisterTemplate', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( 'streaming/streaming_cdk/bootstrap-lambda'), handler='es-bootstrap.lambda_handler', environment={ 'REGION': region, 'KDA_ROLE_ARN': kda_role.role_arn, 'MASTER_ROLE_ARN': master_auth_role.role_arn }, layers=[requirements_layer], timeout=Duration.minutes(15), function_name=bootstrap_function_name) lambda_role = register_template_lambda.role lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogGroup'], resources=[stack.format_arn(service='logs', resource='*')])) lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogStream', 'logs:PutLogEvents'], resources=[ stack.format_arn(service='logs', resource='log_group', resource_name='/aws/lambda/' + bootstrap_function_name + ':*') ])) # Let the lambda assume the master role so that actions can be executed on the cluster # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/ lambda_role.add_to_policy( PolicyStatement(actions=['sts:AssumeRole'], resources=[master_auth_role.role_arn])) master_auth_role.assume_role_policy.add_statements( PolicyStatement(actions=['sts:AssumeRole'], principals=[lambda_role])) # List all the roles that are allowed to access the Elasticsearch cluster. roles = [ ArnPrincipal(limited_auth_role.role_arn), ArnPrincipal(master_auth_role.role_arn), ArnPrincipal(kda_role.role_arn) ] # The users if register_template_lambda and register_template_lambda.role: roles.append(ArnPrincipal( lambda_role.role_arn)) # The lambda used to bootstrap # Create kms key kms_key = Key(scope=self, id='kms-es', alias='custom/es', description='KMS key for Elasticsearch domain', enable_key_rotation=True) # AES Log Groups es_app_log_group = logs.LogGroup(scope=self, id='EsAppLogGroup', retention=logs.RetentionDays.ONE_WEEK, removal_policy=RemovalPolicy.RETAIN) # Create the Elasticsearch domain es_domain_arn = stack.format_arn(service='es', resource='domain', resource_name=application_prefix + '/*') es_access_policy = PolicyDocument(statements=[ PolicyStatement(principals=roles, actions=[ 'es:ESHttpGet', 'es:ESHttpPut', 'es:ESHttpPost', 'es:ESHttpDelete' ], resources=[es_domain_arn]) ]) self.__es_domain = es.CfnDomain( scope=self, id='searchDomain', elasticsearch_cluster_config={ 'instanceType': 'r5.large.elasticsearch', 'instanceCount': 2, 'dedicatedMasterEnabled': True, 'dedicatedMasterCount': 3, 'dedicatedMasterType': 'r5.large.elasticsearch', 'zoneAwarenessEnabled': True, 'zoneAwarenessConfig': { 'AvailabilityZoneCount': '2' }, }, encryption_at_rest_options={ 'enabled': True, 'kmsKeyId': kms_key.key_id }, node_to_node_encryption_options={'enabled': True}, ebs_options={ 'volumeSize': 10, 'ebsEnabled': True }, elasticsearch_version='7.9', domain_name=application_prefix, access_policies=es_access_policy, cognito_options={ 'enabled': True, 'identityPoolId': self.id_pool.ref, 'roleArn': es_role.role_arn, 'userPoolId': self.user_pool.ref }, advanced_security_options={ 'enabled': True, 'internalUserDatabaseEnabled': False, 'masterUserOptions': { 'masterUserArn': master_auth_role.role_arn } }, domain_endpoint_options={ 'enforceHttps': True, 'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07' }, # log_publishing_options={ # # 'ES_APPLICATION_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn # # }, # # 'AUDIT_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # }, # # 'SEARCH_SLOW_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # }, # # 'INDEX_SLOW_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # } # } ) # Not yet on the roadmap... # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283 # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True) # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2) # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch') # Deny all roles from the authentication provider - users must be added to groups # This lambda function will bootstrap the Elasticsearch cluster cognito_function_name = 'CognitoFix' cognito_template_lambda = _lambda.Function( scope=self, id='CognitoFixLambda', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( 'streaming/streaming_cdk/cognito-lambda'), handler='handler.handler', environment={ 'REGION': scope.region, 'USER_POOL_ID': self.__user_pool.ref, 'IDENTITY_POOL_ID': self.__id_pool.ref, 'LIMITED_ROLE_ARN': limited_auth_role.role_arn }, timeout=Duration.minutes(15), function_name=cognito_function_name) lambda_role = cognito_template_lambda.role lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogGroup'], resources=[stack.format_arn(service='logs', resource='*')])) lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogStream', 'logs:PutLogEvents'], resources=[ stack.format_arn(service='logs', resource='log_group', resource_name='/aws/lambda/' + cognito_function_name + ':*') ])) lambda_role.add_to_policy( PolicyStatement(actions=['cognito-idp:ListUserPoolClients'], resources=[self.user_pool.attr_arn])) lambda_role.add_to_policy( PolicyStatement(actions=['iam:PassRole'], resources=[limited_auth_role.role_arn])) cognito_id_res = Fn.join(':', [ 'arn:aws:cognito-identity', scope.region, scope.account, Fn.join('/', ['identitypool', self.__id_pool.ref]) ]) lambda_role.add_to_policy( PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'], resources=[cognito_id_res])) # Get the Domain Endpoint and register it with the lambda as environment variable. register_template_lambda.add_environment( 'DOMAIN', self.__es_domain.attr_domain_endpoint) CfnOutput(scope=self, id='createUserUrl', description="Create a new user in the user pool here.", value="https://" + scope.region + ".console.aws.amazon.com/cognito/users?region=" + scope.region + "#/pool/" + self.user_pool.ref + "/users") CfnOutput(scope=self, id='kibanaUrl', description="Access Kibana via this URL.", value="https://" + self.__es_domain.attr_domain_endpoint + "/_plugin/kibana/") bootstrap_lambda_provider = Provider( scope=self, id='BootstrapLambdaProvider', on_event_handler=register_template_lambda) CustomResource(scope=self, id='ExecuteRegisterTemplate', service_token=bootstrap_lambda_provider.service_token, properties={'Timeout': 900}) cognito_lambda_provider = Provider( scope=self, id='CognitoFixLambdaProvider', on_event_handler=cognito_template_lambda) cognito_fix_resource = CustomResource( scope=self, id='ExecuteCognitoFix', service_token=cognito_lambda_provider.service_token) cognito_fix_resource.node.add_dependency(self.__es_domain)
def __init__(self, scope: Construct, id: str, elasticsearch_index: ElasticsearchIndexResource, dynamodb_table: Table, kms_key: Optional[Key] = None, *, sagemaker_endpoint_name: str = None, sagemaker_endpoint_arn: str = None, sagemaker_embeddings_key: str = None) -> None: super().__init__(scope=scope, id=id) elasticsearch_layer = BElasticsearchLayer( scope=self, name=f"{id}ElasticsearchLayer") if bool(sagemaker_endpoint_name) ^ bool(sagemaker_embeddings_key): raise ValueError( f'In order to use sentence embedding, all of the following enviroment variables are required: ' f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. ' f'Else, provide none of above.') if sagemaker_endpoint_name and not sagemaker_endpoint_arn: sagemaker_endpoint_arn = self.__resolve_sagemaker_endpoints_arn( '*') optional_sagemaker_parameters = { 'SAGEMAKER_ENDPOINT_NAME': sagemaker_endpoint_name or None, 'SAGEMAKER_EMBEDDINGS_KEY': sagemaker_embeddings_key or None } initial_cloner_function = SingletonFunction( scope=self, id='InitialClonerFunction', uuid='e01116a4-f939-43f2-8f5b-cc9f862c9e01', lambda_purpose='InitialClonerSingletonLambda', code=Code.from_asset(initial_cloner_root), handler='index.handler', runtime=Runtime.PYTHON_3_8, layers=[elasticsearch_layer], log_retention=RetentionDays.ONE_MONTH, memory_size=128, timeout=Duration.minutes(15), role=Role( scope=self, id='InitialClonerFunctionRole', assumed_by=ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'LogsPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents', 'logs:DescribeLogStreams', ], resources=['arn:aws:logs:*:*:*'], effect=Effect.ALLOW, ) ]), 'ElasticsearchPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'es:ESHttpDelete', 'es:ESHttpGet', 'es:ESHttpHead', 'es:ESHttpPatch', 'es:ESHttpPost', 'es:ESHttpPut', ], resources=['*'], effect=Effect.ALLOW, ) ]), 'DynamodbPolicy': PolicyDocument(statements=[ PolicyStatement( actions=['dynamodb:*'], resources=['*'], effect=Effect.ALLOW, ) ]), }, description='Role for DynamoDB Initial Cloner Function', ), ) if kms_key: initial_cloner_function.add_to_role_policy( PolicyStatement( actions=['kms:Decrypt'], resources=[kms_key.key_arn], effect=Effect.ALLOW, ), ) initial_cloner = CustomResource( scope=self, id='InitialCloner', service_token=initial_cloner_function.function_arn, removal_policy=RemovalPolicy.DESTROY, properties={ 'DynamodbTableName': dynamodb_table.table_name, 'ElasticsearchIndexName': elasticsearch_index.index_name, 'ElasticsearchEndpoint': elasticsearch_index.elasticsearch_domain.domain_endpoint, }, resource_type='Custom::ElasticsearchInitialCloner', ) primary_key_field = initial_cloner.get_att_string('PrimaryKeyField') dynamodb_stream_arn = dynamodb_table.table_stream_arn if not dynamodb_stream_arn: raise Exception('DynamoDB streams must be enabled for the table') dynamodb_event_source = DynamoEventSource( table=dynamodb_table, starting_position=StartingPosition.LATEST, enabled=True, max_batching_window=Duration.seconds(10), bisect_batch_on_error=True, parallelization_factor=2, batch_size=1000, retry_attempts=10, ) cloner_inline_policies = { 'LogsPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents', 'logs:DescribeLogStreams', ], resources=['arn:aws:logs:*:*:*'], effect=Effect.ALLOW, ) ]), 'ElasticsearchPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'es:ESHttpDelete', 'es:ESHttpGet', 'es:ESHttpHead', 'es:ESHttpPatch', 'es:ESHttpPost', 'es:ESHttpPut', ], resources=[ f'{elasticsearch_index.elasticsearch_domain.domain_arn}/*' ], effect=Effect.ALLOW, ) ]), 'DynamodbStreamsPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'dynamodb:DescribeStream', 'dynamodb:GetRecords', 'dynamodb:GetShardIterator', 'dynamodb:ListStreams', ], resources=[dynamodb_stream_arn], effect=Effect.ALLOW, ) ]), } if sagemaker_endpoint_arn: cloner_inline_policies['SagemakerPolicy'] = PolicyDocument( statements=[ PolicyStatement(actions=['sagemaker:InvokeEndpoint'], resources=[sagemaker_endpoint_arn], effect=Effect.ALLOW) ]) cloner_function = Function( scope=self, id='ClonerFunction', code=Code.from_asset(cloner_root), handler='index.handler', runtime=Runtime.PYTHON_3_8, environment={ 'ES_INDEX_NAME': elasticsearch_index.index_name, 'ES_DOMAIN_ENDPOINT': elasticsearch_index.elasticsearch_domain.domain_endpoint, 'PRIMARY_KEY_FIELD': primary_key_field, **{ k: optional_sagemaker_parameters[k] for k in optional_sagemaker_parameters if all(optional_sagemaker_parameters.values( )) } }, events=[dynamodb_event_source], layers=[elasticsearch_layer], log_retention=RetentionDays.ONE_MONTH, memory_size=128, role=Role( scope=self, id='ClonerFunctionRole', assumed_by=ServicePrincipal('lambda.amazonaws.com'), inline_policies=cloner_inline_policies, description='Role for DynamoDB Cloner Function', ), timeout=Duration.seconds(30), ) if kms_key: cloner_function.add_to_role_policy( PolicyStatement( actions=['kms:Decrypt'], resources=[kms_key.key_arn], effect=Effect.ALLOW, ))
def __init__(self, scope: Construct, id: str, vpc: _ec2.IVpc, codebucket: IBucket, s3_deploy, metrics) -> None: super().__init__(scope, id) self._metrics_mapping = CfnMapping( self, 'AnonymousData', mapping={'SendAnonymousData': { 'Data': 'Yes' }}) self._metrics_condition = CfnCondition( self, 'AnonymousDatatoAWS', expression=Fn.condition_equals( self._metrics_mapping.find_in_map('SendAnonymousData', 'Data'), 'Yes')) self._helper_func = _lambda.SingletonFunction( self, 'SolutionHelper', uuid='75248a81-9138-468c-9ba1-bca6c7137599', runtime=_lambda.Runtime.PYTHON_3_8, handler='lambda_function.handler', description= 'This function generates UUID for each deployment and sends anonymous data to the AWS Solutions team', code=_lambda.Code.from_bucket(bucket=codebucket, key='app_code/solution_helper.zip'), vpc=vpc, timeout=Duration.seconds(30)) self._helper_func.add_dependency(s3_deploy) self._lambda_provider = _custom_resources.Provider( self, 'LambdaProvider', on_event_handler=self._helper_func, vpc=vpc) self._uuid = CustomResource( self, 'UUIDCustomResource', service_token=self._lambda_provider.service_token, properties={"Resource": "UUID"}, resource_type="Custom::CreateUUID", removal_policy=RemovalPolicy.DESTROY) self._send_data = CustomResource( self, 'SendDataCustomResource', service_token=self._lambda_provider.service_token, properties={ "Resource": "AnonymousMetric", "UUID": self._uuid.get_att_string("UUID"), "Solution": metrics["Solution"], "Data": metrics }, resource_type='Custom::AnonymousData', removal_policy=RemovalPolicy.DESTROY) self._send_data.node.add_dependency(self._uuid) Aspects.of(self._helper_func).add(Condition(self._metrics_condition)) Aspects.of(self._uuid).add(Condition(self._metrics_condition)) Aspects.of(self._send_data).add(Condition(self._metrics_condition))
def __init__( self, scope: Construct, id: str, elasticsearch_index: ElasticsearchIndexResource, dynamodb_table: Table, kms_key: Optional[Key] = None, ) -> None: super().__init__(scope=scope, id=id) elasticsearch_layer = BElasticsearchLayer( scope=self, name=f"{id}ElasticsearchLayer") initial_cloner_function = SingletonFunction( scope=self, id="InitialClonerFunction", uuid="e01116a4-f939-43f2-8f5b-cc9f862c9e01", lambda_purpose="InitialClonerSingletonLambda", code=Code.from_asset(initial_cloner_root), handler="index.handler", runtime=Runtime.PYTHON_3_8, layers=[elasticsearch_layer], log_retention=RetentionDays.ONE_MONTH, memory_size=128, timeout=Duration.minutes(15), role=Role( scope=self, id="InitialClonerFunctionRole", assumed_by=ServicePrincipal("lambda.amazonaws.com"), inline_policies={ "LogsPolicy": PolicyDocument(statements=[ PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "logs:DescribeLogStreams", ], resources=["arn:aws:logs:*:*:*"], effect=Effect.ALLOW, ) ]), "ElasticsearchPolicy": PolicyDocument(statements=[ PolicyStatement( actions=[ "es:ESHttpDelete", "es:ESHttpGet", "es:ESHttpHead", "es:ESHttpPatch", "es:ESHttpPost", "es:ESHttpPut", ], resources=["*"], effect=Effect.ALLOW, ) ]), "DynamodbPolicy": PolicyDocument(statements=[ PolicyStatement( actions=["dynamodb:*"], resources=["*"], effect=Effect.ALLOW, ) ]), }, description="Role for DynamoDB Initial Cloner Function", ), ) if kms_key: initial_cloner_function.add_to_role_policy( PolicyStatement( actions=["kms:Decrypt"], resources=[kms_key.key_arn], effect=Effect.ALLOW, ), ) initial_cloner = CustomResource( scope=self, id="InitialCloner", service_token=initial_cloner_function.function_arn, removal_policy=RemovalPolicy.DESTROY, properties={ "DynamodbTableName": dynamodb_table.table_name, "ElasticsearchIndexName": elasticsearch_index.index_name, "ElasticsearchEndpoint": elasticsearch_index.elasticsearch_domain.domain_endpoint, }, resource_type="Custom::ElasticsearchInitialCloner", ) primary_key_field = initial_cloner.get_att_string("PrimaryKeyField") dynamodb_stream_arn = dynamodb_table.table_stream_arn if not dynamodb_stream_arn: raise Exception("DynamoDB streams must be enabled for the table") dynamodb_event_source = DynamoEventSource( table=dynamodb_table, starting_position=StartingPosition.LATEST, enabled=True, max_batching_window=Duration.seconds(10), bisect_batch_on_error=True, parallelization_factor=2, batch_size=1000, retry_attempts=10, ) cloner_function = Function( scope=self, id="ClonerFunction", code=Code.from_asset(cloner_root), handler="index.handler", runtime=Runtime.PYTHON_3_8, environment={ "ES_INDEX_NAME": elasticsearch_index.index_name, "ES_DOMAIN_ENDPOINT": elasticsearch_index.elasticsearch_domain.domain_endpoint, "PRIMARY_KEY_FIELD": primary_key_field, }, events=[dynamodb_event_source], layers=[elasticsearch_layer], log_retention=RetentionDays.ONE_MONTH, memory_size=128, role=Role( scope=self, id="ClonerFunctionRole", assumed_by=ServicePrincipal("lambda.amazonaws.com"), inline_policies={ "LogsPolicy": PolicyDocument(statements=[ PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "logs:DescribeLogStreams", ], resources=["arn:aws:logs:*:*:*"], effect=Effect.ALLOW, ) ]), "ElasticsearchPolicy": PolicyDocument(statements=[ PolicyStatement( actions=[ "es:ESHttpDelete", "es:ESHttpGet", "es:ESHttpHead", "es:ESHttpPatch", "es:ESHttpPost", "es:ESHttpPut", ], resources=[ f"{elasticsearch_index.elasticsearch_domain.domain_arn}/*" ], effect=Effect.ALLOW, ) ]), "DynamodbStreamsPolicy": PolicyDocument(statements=[ PolicyStatement( actions=[ "dynamodb:DescribeStream", "dynamodb:GetRecords", "dynamodb:GetShardIterator", "dynamodb:ListStreams", ], resources=[dynamodb_stream_arn], effect=Effect.ALLOW, ) ]), }, description="Role for DynamoDB Cloner Function", ), timeout=Duration.seconds(30), ) if kms_key: cloner_function.add_to_role_policy( PolicyStatement( actions=["kms:Decrypt"], resources=[kms_key.key_arn], effect=Effect.ALLOW, ), )
def __init__(self, scope: core.Construct, id: str, vpc: _ec2.Vpc, redshift_secret_arn: str, lambda_sg: _ec2.SecurityGroup, clean_glue_db: _glue.Database, redshift_role_arn: str, redshift_cluster_endpoint: _redshift.Endpoint, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.__vpc = vpc self.__redshift_secret_arn = redshift_secret_arn self.__lambda_sg = lambda_sg self.__clean_glue_db = clean_glue_db self.__redshift_role_arn = redshift_role_arn stack = Stack.of(self) # Generate secrets for Redshift users generator = SecretStringGenerator(exclude_characters="'", exclude_punctuation=True) self.__etl_user_secret = Secret( scope=self, id='ETLUserSecret', description="ETL user Redshift", generate_secret_string=SecretStringGenerator( exclude_characters="'", exclude_punctuation=True, generate_string_key="password", secret_string_template=json.dumps( { 'username': _config.Redshift.ETL_USER, 'dbname': _config.Redshift.DATABASE, 'host': redshift_cluster_endpoint.hostname, 'port': core.Token.as_string(redshift_cluster_endpoint.port) } ) ) ) self.__dataengineer_user_secret = Secret( scope=self, id='DataEngineerUserSecret', description="DataEngineer user Redshift", generate_secret_string=SecretStringGenerator( exclude_characters="'", exclude_punctuation=True, generate_string_key="password", secret_string_template=json.dumps( { 'username': _config.Redshift.DATA_ENGINEER_USER, 'dbname': _config.Redshift.DATABASE, 'host': redshift_cluster_endpoint.hostname, 'port': core.Token.as_string(redshift_cluster_endpoint.port) } ) )) self.__quicksight_user_secret = Secret( scope=self, id='DatavizUserSecret', description="Quicksight user Redshift", generate_secret_string=SecretStringGenerator( exclude_characters="'", exclude_punctuation=True, generate_string_key="password", secret_string_template=json.dumps( { 'username': _config.Redshift.DATAVIZ_USER, 'dbname': _config.Redshift.DATABASE, 'host': redshift_cluster_endpoint.hostname, 'port': core.Token.as_string(redshift_cluster_endpoint.port) } ) )) self.__subnets_selection = _ec2.SubnetSelection(availability_zones=None, one_per_az=None, subnet_group_name=None, subnet_name=None, subnets=None, subnet_type=_ec2.SubnetType.PRIVATE) # Use the following command line to generate the python dependencies layer content # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt # Build the lambda layer assets subprocess.call( ['pip', 'install', '-t', 'dwh/dwh_cdk/bootstrap_lambda_layer/python/lib/python3.8/site-packages', '-r', 'dwh/dwh_cdk/bootstrap_lambda/requirements.txt', '--platform', 'manylinux1_x86_64', '--only-binary=:all:', '--upgrade']) requirements_layer = _lambda.LayerVersion(scope=self, id='PythonRequirementsTemplate', code=_lambda.Code.from_asset('dwh/dwh_cdk/bootstrap_lambda_layer'), compatible_runtimes=[_lambda.Runtime.PYTHON_3_8]) # This lambda function will run SQL commands to setup Redshift users and tables bootstrap_function_name = 'RedshiftBootstrap' register_template_lambda = _lambda.Function(scope=self, id='RegisterTemplate', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( 'dwh/dwh_cdk/bootstrap_lambda'), handler='redshift_setup.handler', environment={ 'SQL_SCRIPT_LOCATION': _config.BINARIES_LOCATION + self.SQL_SCRIPT_DIR, 'SECRET_ARN': self.__redshift_secret_arn, 'SQL_SCRIPT_FILES': _config.RedshiftDeploy.SQL_SCRIPT_FILES, 'ETL_SECRET': self.__etl_user_secret.secret_arn, 'DATAENG_SECRET': self.__dataengineer_user_secret.secret_arn, 'DATAVIZ_SECRET': self.__quicksight_user_secret.secret_arn, 'GLUE_DATABASE': self.__clean_glue_db.database_name, 'REDSHIFT_IAM_ROLE': self.__redshift_role_arn }, layers=[requirements_layer], timeout=core.Duration.minutes(3), vpc=self.__vpc, vpc_subnets=self.__subnets_selection, security_group=self.__lambda_sg, function_name=bootstrap_function_name, memory_size=256 ) lambda_role = register_template_lambda.role lambda_role.add_to_policy(PolicyStatement( actions=['secretsmanager:GetResourcePolicy', 'secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret', 'secretsmanager:ListSecretVersionIds'], resources=[stack.format_arn(service='secretsmanager', resource='*')])) lambda_role.add_to_policy(PolicyStatement(actions=['logs:CreateLogGroup'], resources=[stack.format_arn(service='logs', resource='*')])) lambda_role.add_to_policy(PolicyStatement(actions=['logs:CreateLogStream', 'logs:PutLogEvents'], resources=[stack.format_arn(service='logs', resource='log_group', resource_name='/aws/lambda/' + bootstrap_function_name + ':*')])) artifacts_bucket_arn = 'arn:aws:s3:::' + _config.ARA_BUCKET.replace("s3://", "") lambda_role.add_to_policy(PolicyStatement(actions=['s3:GetObject', 's3:GetObjectVersion'], resources=[artifacts_bucket_arn, artifacts_bucket_arn + '/binaries/*'])) bootstrap_lambda_provider = Provider(scope=self, id='BootstrapLambdaProvider', on_event_handler=register_template_lambda) CustomResource(scope=self, id='ExecuteRegisterTemplate', service_token=bootstrap_lambda_provider.service_token) self.__secrets_manager_vpc_endpoint_sg = _ec2.SecurityGroup(self, id="secrets_manager_vpc_endpoint-sg", vpc=self.__vpc, allow_all_outbound=None, description=None, security_group_name="secrets-manager-vpc_endpoint-sg") self.__secrets_manager_vpc_endpoint_sg.add_ingress_rule(self.__lambda_sg, _ec2.Port.all_tcp() ) self.__security_groups_list = [self.__secrets_manager_vpc_endpoint_sg] self.__endpoint_service_name = 'com.amazonaws.%s.secretsmanager' % stack.region # Create VPC endpoint for SecretsManager secrets_manager_vpc_endpoint = _ec2.InterfaceVpcEndpoint(stack, "Secretsmanager VPC Endpoint", vpc=self.__vpc, service=_ec2.InterfaceVpcEndpointService( self.__endpoint_service_name, 443), subnets=self.__subnets_selection, security_groups=self.__security_groups_list )