def __init__(self, scope: core.Construct, id: str) -> None: super().__init__(scope, id) self.user_pool = aws_cognito.UserPool( self, "UsersPool", sign_in_aliases=aws_cognito.SignInAliases(username=True)) cfn_user_pool: aws_cognito.CfnUserPool = self.user_pool.node.default_child cfn_user_pool.policies = aws_cognito.CfnUserPool.PoliciesProperty( password_policy=aws_cognito.CfnUserPool.PasswordPolicyProperty( minimum_length=8, require_lowercase=False, require_numbers=False, require_symbols=False, require_uppercase=False)) user_pool_output = core.CfnOutput(self, id="KesherUserPoolID", value=self.user_pool.user_pool_id) user_pool_output.override_logical_id("KesherUserPoolID") user_pool_arn_output = core.CfnOutput( self, id="KesherUserPoolArn", value=self.user_pool.user_pool_arn) user_pool_arn_output.override_logical_id("KesherUserPoolArn") self.user_pool_client = aws_cognito.UserPoolClient( self, "PoolClient", user_pool=self.user_pool, auth_flows=AuthFlow(admin_user_password=False, user_password=True), ) auth_client_output = core.CfnOutput( self, id="AuthClientID", value=self.user_pool_client.user_pool_client_id) auth_client_output.override_logical_id("AuthClientID")
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.user_pool = cognito.UserPool( self, 'UserPool', auto_verify=cognito.AutoVerifiedAttrs(email=True), self_sign_up_enabled=True, user_pool_name='MysfitsUserPool') user_pool_client = cognito.UserPoolClient( self, 'UserPoolClient', user_pool=self.user_pool, user_pool_client_name='MysfitsUserPoolClient') core.CfnOutput(self, 'CognitoUserPool', description='The Cognito User Pool', value=self.user_pool.user_pool_id) # self.user_pool_id = core.CfnOutput( # self, "CognitoUserPool", # description='The Cognito User Pool', # value=user_pool.user_pool_id, # export_name="user-pool-id" # ).import_value core.CfnOutput(self, 'CognitoUserPoolClient', description='The Cognito User Pool Client', value=user_pool_client.user_pool_client_id)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Retrieves the ACM certificate arn from an SSM parameter certificate_arn = aws_ssm.StringParameter.value_from_lookup(self, 'cognito_certificate_arn') # Retrieves the domain name from an SSM parameter domain_name = aws_ssm.StringParameter.value_from_lookup(self, 'kubeflow-cognito-domain-name') # Creates the cognito user pool user_pool = aws_cognito.UserPool(self, 'UserPool', user_pool_name = 'mlplatform-user-pool', mfa = aws_cognito.Mfa.OFF, sign_in_aliases = aws_cognito.SignInAliases( username=True, email=True)) # Creates the cognito user pool client user_pool_client = aws_cognito.UserPoolClient(self, 'UserPoolClient', user_pool = user_pool, generate_secret = True, o_auth = aws_cognito.OAuthSettings( flows = aws_cognito.OAuthFlows( authorization_code_grant = True ), scopes = [ aws_cognito.OAuthScope.EMAIL, aws_cognito.OAuthScope.OPENID, aws_cognito.OAuthScope.PROFILE, aws_cognito.OAuthScope.COGNITO_ADMIN ], callback_urls = ['https://kubeflow.' + domain_name + '/oauth2/idpresponse'] ), user_pool_client_name = 'mlplatform-user-pool-client') # Initialises the ACM certificate cognito_custom_domain_certificate = aws_certificatemanager.Certificate.from_certificate_arn(self, 'DomainCertificate', certificate_arn) # Creates the cognito user pool domain user_pool_domain = aws_cognito.UserPoolDomain(self, 'UserPoolDomain', user_pool = user_pool, custom_domain = aws_cognito.CustomDomainOptions( certificate = cognito_custom_domain_certificate, domain_name = 'auth.' + domain_name ) )
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) account = kwargs['env'].account region = kwargs['env'].region user_pool = cognito.UserPool(self, "DemoUserPool", user_pool_name="demo-user-pool", self_sign_up_enabled=True, enable_sms_role=False, password_policy={ "min_length": 12, "require_digits": True, "require_lowercase": True, "require_symbols": False, "require_uppercase": False }, standard_attributes={ "address": { "required": False, "mutable": True }, "nickname": { "required": False, "mutable": True } }, custom_attributes={ "favorite_fruit": cognito.StringAttribute(min_len=1, max_len=30, mutable=True) }, sign_in_aliases={ "username": True, "email": True }, auto_verify={"email": True}) user_pool_domain = cognito.UserPoolDomain( self, "DemoUserPoolDomain", user_pool=user_pool, cognito_domain={ "domain_prefix": "demo-user-pool-{}-{}".format(account, region) }) self.user_pool = user_pool self.user_pool_domain = user_pool_domain
def _create_user_pool(self) -> cognito.UserPool: pool = cognito.UserPool( scope=self, id="orbit-user-pool", account_recovery=cognito.AccountRecovery.EMAIL_ONLY, auto_verify=cognito.AutoVerifiedAttrs(email=True, phone=False), custom_attributes=None, email_settings=None, lambda_triggers=None, mfa=cognito.Mfa.OFF, mfa_second_factor=None, password_policy=cognito.PasswordPolicy( min_length=8, require_digits=True, require_lowercase=True, require_symbols=True, require_uppercase=True, temp_password_validity=Duration.days(5), ), self_sign_up_enabled=False, sign_in_aliases=cognito.SignInAliases(email=True, phone=False, preferred_username=False, username=True), sign_in_case_sensitive=True, sms_role=None, sms_role_external_id=None, standard_attributes=cognito.StandardAttributes( email=cognito.StandardAttribute(required=True, mutable=True)), user_invitation=cognito.UserInvitationConfig( email_subject="Invite to join Orbit Workbench!", email_body= "Hello, you have been invited to join Orbit Workbench!<br/><br/>" "Username: {username}<br/>" "Temporary password: {####}<br/><br/>" "Regards", ), user_pool_name=f"orbit-{self.env_name}-user-pool", ) pool.apply_removal_policy(policy=core.RemovalPolicy.DESTROY) pool.add_domain( id="orbit-user-pool-domain", cognito_domain=cognito.CognitoDomainOptions( domain_prefix=f"orbit-{self.context.account_id}-{self.env_name}" ), ) return pool
def _create_userpool(self): user_pool = cognito.UserPool( self, "movio", account_recovery=cognito.AccountRecovery.EMAIL_ONLY, auto_verify=cognito.AutoVerifiedAttrs(email=True, phone=False), mfa=cognito.Mfa.OFF, mfa_second_factor=cognito.MfaSecondFactor(otp=True, sms=False), self_sign_up_enabled=False, sign_in_aliases=cognito.SignInAliases(email=True, username=True), standard_attributes=cognito.StandardAttributes( email=cognito.StandardAttribute(mutable=False, required=True), ), user_invitation=cognito.UserInvitationConfig( email_subject="Moshan email verification", email_body= "Thanks for signing up to moshan! Your username is {username} and temporary password is {####}\nYou can now login at https://moshan.tv", ), user_verification=cognito.UserVerificationConfig( email_subject="Moshan email verification", email_body= "Thanks for signing up to moshan! Verify your account by clicking on {##Verify Email##}", email_style=cognito.VerificationEmailStyle.LINK), ) user_pool.add_client( "moshan", auth_flows=cognito.AuthFlow(refresh_token=True), o_auth=cognito.OAuthSettings( flows=cognito.OAuthFlows(authorization_code_grant=True), callback_urls=[ "https://moshan.tv/callback.html", "https://beta.moshan.tv/callback.html" ], scopes=[ cognito.OAuthScope.EMAIL, cognito.OAuthScope.OPENID, cognito.OAuthScope.PROFILE ]), prevent_user_existence_errors=True, ) cert = Certificate.from_certificate_arn(self, "domainCert", self.cert_arn) user_pool.add_domain("CognitoDomain", custom_domain=cognito.CustomDomainOptions( domain_name=self.domain_name, certificate=cert))
def _create_user_pool(self) -> aws_cognito.UserPool: user_pool = aws_cognito.UserPool( scope=self, id="UserPoolX", # auto_verify=aws_cognito.AutoVerifiedAttrs(email=True), self_sign_up_enabled=True, required_attributes=RequiredAttributes(email=True), sign_in_aliases=SignInAliases(email=True), user_verification=UserVerificationConfig( email_style=VerificationEmailStyle.LINK)) aws_cognito.UserPoolClient(scope=self, user_pool=user_pool, id="AuthClientWeb", generate_secret=False) return user_pool
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) servu_userpool = aws_cognito.UserPool( self, 'servu-userpool', user_pool_name='ServU Users', sign_in_aliases=aws_cognito.SignInAliases(email=True), standard_attributes=aws_cognito.StandardAttributes( email=aws_cognito.StandardAttribute(required=True, mutable=True), fullname=aws_cognito.StandardAttribute(required=True, mutable=True), address=aws_cognito.StandardAttribute(required=True, mutable=True), phone_number=aws_cognito.StandardAttribute(required=True, mutable=True)), password_policy=aws_cognito.PasswordPolicy( min_length=8, require_digits=True, require_lowercase=True, require_symbols=True, require_uppercase=True, temp_password_validity=core.Duration.days(1)), sign_in_case_sensitive=False) servu_userpool_web_client = aws_cognito.UserPoolClient( self, 'servu-userpool-web-client', user_pool=servu_userpool, auth_flows=aws_cognito.AuthFlow(custom=True, refresh_token=True, user_srp=True), user_pool_client_name='ServU Web Client') # TODO: Manually configure the domain and callback URLs. Look at CFN for the Pool ID and Client ID core.CfnOutput(self, 'servu-userpool-id', value=servu_userpool.user_pool_id, export_name='servu-userpool-id') core.CfnOutput(self, 'servu-userpool-client-id', value=servu_userpool_web_client.user_pool_client_id, export_name='servu-userpool-client-id')
def add_cognito(self): password_policy = _cognito.PasswordPolicy( require_lowercase=False, require_digits=False, require_symbols=False, require_uppercase=False, ) user_pool = _cognito.UserPool( self, 'UserPool', password_policy=password_policy, user_pool_name='UserPool', self_sign_up_enabled=True, user_verification={ "email_subject": "Verify your email for our awesome app!", "email_body": "Hello {username}, Thanks for signing up to our awesome app! Your verification code is {####}", "email_style": _cognito.VerificationEmailStyle.CODE, }, user_invitation={ "email_subject": "Invite to join our awesome app!", "email_body": "Hello {username}, you have been invited to join our awesome app! Your temporary password is {####}", }, sign_in_aliases={"email": True}, auto_verify={"email": True}, ) user_pool_client = user_pool.add_client("AppClient", auth_flows={ "user_password": True, "user_srp": True, "refresh_token": True, "admin_user_password": True }) idp = _cognito.CfnIdentityPool.CognitoIdentityProviderProperty( client_id=user_pool_client.user_pool_client_id, provider_name=user_pool.user_pool_provider_name) identity_pool = _cognito.CfnIdentityPool( self, "IdPool", allow_unauthenticated_identities=False, cognito_identity_providers=[idp]) authenticated_principal = _iam.FederatedPrincipal( 'cognito-identity.amazonaws.com', { "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" }, }, "sts:AssumeRoleWithWebIdentity") unauthenticated_principal = _iam.FederatedPrincipal( 'cognito-identity.amazonaws.com', { "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" }, }, "sts:AssumeRoleWithWebIdentity") authenticated_role = _iam.Role(self, "CognitoDefaultAuthenticatedRole", assumed_by=authenticated_principal) unauthenticated_role = _iam.Role(self, "CognitoDefaultUnAuthenticatedRole", assumed_by=unauthenticated_principal) authenticated_policy = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=[ "mobileanalytics:PutEvents", "cognito-sync:*", "cognito-identity:*" ], resources=["*"]) unauthenticated_policy = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=[ "mobileanalytics:PutEvents", "cognito-sync:*", ], resources=["*"]) authenticated_role.add_to_policy(authenticated_policy) unauthenticated_role.add_to_policy(unauthenticated_policy) _cognito.CfnIdentityPoolRoleAttachment(self, "DefaultValidRoleAttachment", identity_pool_id=identity_pool.ref, roles={ "authenticated": authenticated_role.role_arn, "unauthenticated": unauthenticated_role.role_arn }) return user_pool, identity_pool, user_pool_client
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) power_transformers = aws_dynamodb.Table( self, "PowerTransformers", table_name="PowerTransformers", partition_key=aws_dynamodb.Attribute( name="name", type=aws_dynamodb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY) function = _lambda.Function( self, "power_transformers_data_enrichment", function_name="power_transformers_data_enrichment", runtime=_lambda.Runtime.PYTHON_3_7, handler="lambda_function.handler", code=_lambda.Code.asset("./lambda/data-enrichment")) function.add_environment('TABLE_NAME', power_transformers.table_name) function.add_to_role_policy( iam.PolicyStatement(actions=['dynamodb:GetItem'], resources=[f"{power_transformers.table_arn}"], effect=iam.Effect.ALLOW)) function.add_permission( principal=iam.ServicePrincipal('iotanalytics.amazonaws.com'), action='lambda:InvokeFunction', id='pt-iot-analytics') bucket = s3.Bucket( self, 'PowerTransformersTelemetryBucket', bucket_name=f"{props['projectName'].lower()}-{core.Aws.ACCOUNT_ID}", removal_policy=core.RemovalPolicy.DESTROY) output_bucket = s3.Bucket( self, 'PowerTransformersProcessedDataBucket', bucket_name= f"{props['projectName'].lower()}-output-{core.Aws.ACCOUNT_ID}", removal_policy=core.RemovalPolicy.DESTROY) # Apply least privilege s3_role = iam.Role( self, "IotAnalyticsS3Role", assumed_by=iam.ServicePrincipal("iotanalytics.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonS3FullAccess') ]) # s3_role.add_to_policy(iam.PolicyStatement(actions=["s3:PutObject", "s3:DeleteObject", "s3:GetBucketLocation"], # resources=[f"{bucket.bucket_arn}", f"{bucket.bucket_arn}/*"], effect=iam.Effect.ALLOW)) # Apply least privilege s3_output_role = iam.Role( self, "IotAnalyticsS3OutputRole", assumed_by=iam.ServicePrincipal("iotanalytics.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonS3FullAccess') ], ) # s3_output_role.add_to_policy(iam.PolicyStatement(actions=["s3:PutObject", "s3:DeleteObject", "s3:GetBucketLocation"], # resources=[f"{output_bucket.bucket_arn}", f"{output_bucket.bucket_arn}/*"], effect=iam.Effect.ALLOW)) project_name = props['projectName'].lower().replace('-', '_') channel_name = f"{project_name}_channel" datastore_name = f"{project_name}_datastore" channel_s3 = CHANNEL.CustomerManagedS3Property( bucket=bucket.bucket_name, key_prefix='raw/', role_arn=s3_role.role_arn) channel_storage = CHANNEL.ChannelStorageProperty( customer_managed_s3=channel_s3) CHANNEL(self, 'iot_channel', channel_name=channel_name, channel_storage=channel_storage) datastore_s3 = DATASTORE.CustomerManagedS3Property( bucket=bucket.bucket_name, key_prefix='processed/', role_arn=s3_role.role_arn) datastore_storage = DATASTORE.DatastoreStorageProperty( customer_managed_s3=datastore_s3) datastore = DATASTORE(self, 'iot_datastore', datastore_name=datastore_name, datastore_storage=datastore_storage) channel_activity = PIPELINE.ChannelProperty(name='ChannelActivity', channel_name=channel_name, next='LambdaActivity') lambda_activity = PIPELINE.LambdaProperty( name='LambdaActivity', lambda_name='power_transformers_data_enrichment', next='DatastoreActivity', batch_size=10) datastore_activity = PIPELINE.DatastoreProperty( name='DatastoreActivity', datastore_name=datastore_name) pipeline_activities = PIPELINE.ActivityProperty( channel=channel_activity, lambda_=lambda_activity, datastore=datastore_activity) pipeline = PIPELINE(self, 'iot_pipeline', pipeline_name=f"{project_name}_pipeline", pipeline_activities=[pipeline_activities]) pipeline.add_depends_on(datastore) query_action = DATASET.QueryActionProperty( sql_query=f"SELECT * FROM {datastore_name}") action = DATASET.ActionProperty(query_action=query_action, action_name='sqlAction') schedule_expression = DATASET.ScheduleProperty( schedule_expression='cron(1/5 * * * ? *)') trigger_schedule = DATASET.TriggerProperty( schedule=schedule_expression) dataset_s3_destination = DATASET.S3DestinationConfigurationProperty( bucket=output_bucket.bucket_name, key= 'dataset/Version/!{iotanalytics:scheduleTime}_!{iotanalytics:versionId}.csv', role_arn=s3_output_role.role_arn) dataset_destination = DATASET.DatasetContentDeliveryRuleDestinationProperty( s3_destination_configuration=dataset_s3_destination) content_delivery_rules = DATASET.DatasetContentDeliveryRuleProperty( destination=dataset_destination) dataset = DATASET(self, 'iot_dataset', dataset_name=f"{project_name}_dataset", actions=[action], triggers=[trigger_schedule], content_delivery_rules=[content_delivery_rules]) dataset.add_depends_on(datastore) user_pool = aws_cognito.UserPool( self, 'kibanaUserPool', self_sign_up_enabled=False, sign_in_aliases=aws_cognito.SignInAliases(username=True, email=True)) aws_cognito.CfnUserPoolDomain( self, 'userPoolDomain', user_pool_id=user_pool.user_pool_id, domain= f"{props['projectName'].lower()}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" ) user_pool_client = aws_cognito.UserPoolClient(self, 'kibanaClientId', user_pool=user_pool, generate_secret=True) identity_provider = aws_cognito.CfnIdentityPool.CognitoIdentityProviderProperty( client_id=user_pool_client.user_pool_client_id, provider_name=user_pool.user_pool_provider_name) identity_pool = aws_cognito.CfnIdentityPool( self, 'identityPool', allow_unauthenticated_identities=False, cognito_identity_providers=[identity_provider]) # Apply least privilege cognito_authenticated_role = iam.Role( self, "CognitoAuthRole", assumed_by=iam.FederatedPrincipal( "cognito-identity.amazonaws.com", assume_role_action='sts:AssumeRoleWithWebIdentity', conditions={ 'StringEquals': { 'cognito-identity.amazonaws.com:aud': identity_pool.ref }, 'ForAnyValue:StringLike': { 'cognito-identity.amazonaws.com:amr': 'authenticated' } }), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonESFullAccess') ]) aws_cognito.CfnIdentityPoolRoleAttachment( self, 'identityPoolRoleAttachment', identity_pool_id=identity_pool.ref, roles={'authenticated': cognito_authenticated_role.role_arn}) cognito_options = DOMAIN.CognitoOptionsProperty( enabled=True, user_pool_id=user_pool.user_pool_id, identity_pool_id=identity_pool.ref, role_arn= f"arn:aws:iam::{core.Aws.ACCOUNT_ID}:role/service-role/CognitoAccessForAmazonES" ) ebs_options = DOMAIN.EBSOptionsProperty(ebs_enabled=True, volume_size=10, volume_type='gp2') elasticsearch_cluster_config = DOMAIN.ElasticsearchClusterConfigProperty( instance_count=1, instance_type='r5.large.elasticsearch') encryption_at_rest_options = DOMAIN.EncryptionAtRestOptionsProperty( enabled=True) node_to_node_encryption_options = DOMAIN.NodeToNodeEncryptionOptionsProperty( enabled=True) snapshot_options = DOMAIN.SnapshotOptionsProperty( automated_snapshot_start_hour=0) es_domain_arn = f"arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:domain/{props['projectName'].lower()}/*" es_policy_statement = iam.PolicyStatement(actions=['es:*'], resources=[es_domain_arn]) es_policy_statement.add_arn_principal( cognito_authenticated_role.role_arn) policy_document = iam.PolicyDocument() policy_document.add_statements(es_policy_statement) domain = DOMAIN( self, 'elasticsearch', domain_name=f"{props['projectName'].lower()}", cognito_options=cognito_options, ebs_options=ebs_options, elasticsearch_cluster_config=elasticsearch_cluster_config, encryption_at_rest_options=encryption_at_rest_options, node_to_node_encryption_options=node_to_node_encryption_options, snapshot_options=snapshot_options, elasticsearch_version='6.8', access_policies=policy_document) function = _lambda.Function( self, "load_data_from_s3_to_es", function_name="load_data_from_s3_to_es", runtime=_lambda.Runtime.PYTHON_3_7, handler="lambda_function.handler", code=_lambda.Code.asset("./lambda/load-data-from-s3-to-es.zip")) function.add_environment('ES_HOST', domain.attr_domain_endpoint) function.add_environment('ES_REGION', f"{core.Aws.REGION}") function.add_to_role_policy( iam.PolicyStatement(actions=['es:ESHttpPost'], resources=[es_domain_arn], effect=iam.Effect.ALLOW)) function.add_to_role_policy( iam.PolicyStatement(actions=['s3:GetObject'], resources=[f"{output_bucket.bucket_arn}/*"], effect=iam.Effect.ALLOW)) notification = aws_s3_notifications.LambdaDestination(function) output_bucket.add_event_notification(s3.EventType.OBJECT_CREATED, notification) load_ddb_custom_resource = LoadDDBDataCustomResource( self, "LoadDDBData", table_name=power_transformers.table_name, table_arn=power_transformers.table_arn) load_ddb_custom_resource.node.add_dependency(power_transformers) load_es_index_custom_resource = LoadESIndexCustomResource( self, "LoadESIndex", es_host=domain.attr_domain_endpoint, es_region=f"{core.Aws.REGION}", es_domain_arn=es_domain_arn) load_es_index_custom_resource.node.add_dependency(domain) load_kibana_dashboards_custom_resource = LoadKibanaDashboardsCustomResource( self, "LoadKibanaDashboards", es_host=domain.attr_domain_endpoint, es_region=f"{core.Aws.REGION}", es_domain_arn=es_domain_arn) load_kibana_dashboards_custom_resource.node.add_dependency( load_es_index_custom_resource)
def __init__(self, scope: core.Construct, id: str, region, domain, **kwargs) -> None: super().__init__(scope, id, **kwargs) # VPC , we need one for ECS cluster ( sadly ) vpc = ec2.Vpc.from_lookup(self, 'vpc', is_default=True) cluster = ecs.Cluster(self, 'Cluster', vpc=vpc) # Route53 & SSL Certificate zone = dns.HostedZone(self, "dns", zone_name=domain) dns.ARecord(self, 'MinecraftRecord', zone=zone, record_name='minecraft', target=dns.RecordTarget(values=['1.2.3.4'])) cert = acm.Certificate( self, 'cert', domain_name=f'*.{domain}', validation=acm.CertificateValidation.from_dns(zone)) # ECS ( Cluster, EFS, Task Def) fs = efs.FileSystem(self, 'EFS', vpc=vpc, removal_policy=core.RemovalPolicy.DESTROY) task_definition = ecs.FargateTaskDefinition(self, 'TaskDef', memory_limit_mib=4096, cpu=1024) container = task_definition.add_container( 'MinecraftDocker', image=ecs.ContainerImage.from_registry('darevee/minecraft-aws'), logging=ecs.AwsLogDriver(stream_prefix='Minecraf'), cpu=1024, memory_limit_mib=4096) container.add_mount_points( ecs.MountPoint(container_path='/minecraft', source_volume='efs', read_only=False)) cfn_task = container.task_definition.node.default_child cfn_task.add_property_override("Volumes", [{ "EFSVolumeConfiguration": { "FilesystemId": fs.file_system_id }, "Name": "efs" }]) container.add_port_mappings(ecs.PortMapping(container_port=25565)) sg = ec2.SecurityGroup(self, 'sg', vpc=vpc) sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(25565), description='Minecraft Access') sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(25575), description='RCONN Access') fs.connections.allow_default_port_from(sg) subnets = ",".join(vpc.select_subnets().subnet_ids) # Cognito ( For ApiGW Authentication) userpool = cognito.UserPool( self, 'UserPool', user_invitation=cognito.UserInvitationConfig( email_body= """No cześć {username}, zostałeś zaproszony do naszego Minecraft! Twoje tymczasowe hasło to {####} """, email_subject="Zaproszenie do minecrafta")) # APIGW (Gateway, Lambdas, S3 Static content) # Lambda Starter starter = _lambda.Function(self, 'Starter', runtime=_lambda.Runtime.PYTHON_3_8, handler='index.lambda_handler', code=_lambda.Code.asset('lambda/starter'), timeout=core.Duration.seconds(300), environment={ 'cluster': cluster.cluster_name, 'subnets': subnets, 'security_groups': sg.security_group_id, 'task_definition': task_definition.task_definition_arn, 'region': region, 'zone_id': zone.hosted_zone_id, 'domain': domain }) starter.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["*"], actions=[ "ecs:ListTasks", "ecs:DescribeTasks", "ec2:DescribeNetworkInterfaces" ])) starter.add_to_role_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[task_definition.task_definition_arn], actions=["ecs:RunTask", "ecs:DescribeTasks"])) starter.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[ task_definition.task_role.role_arn, task_definition.execution_role.role_arn ], actions=["iam:PassRole"])) starter.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[zone.hosted_zone_arn], actions=["route53:ChangeResourceRecordSets"])) # S3 static webpage bucket = s3.Bucket(self, "S3WWW", public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY, website_index_document="index.html") s3d.BucketDeployment(self, "S3Deploy", destination_bucket=bucket, sources=[s3d.Source.asset("static_page")]) status = _lambda.Function(self, 'Status', runtime=_lambda.Runtime.PYTHON_3_8, handler='index.lambda_handler', code=_lambda.Code.asset('lambda/status'), environment={ 'url': f"https://minecrafter.{domain}", 'domain': domain }) # ApiGW apigw = api.LambdaRestApi(self, 'ApiGW', handler=status, proxy=False, domain_name={ "domain_name": f'minecrafter.{domain}', "certificate": cert }, default_cors_preflight_options={ "allow_origins": api.Cors.ALL_ORIGINS, "allow_methods": api.Cors.ALL_METHODS }) start = apigw.root.add_resource('start') start.add_method('ANY', integration=api.LambdaIntegration(starter)) apigw.root.add_method('ANY') dns.ARecord(self, 'PointDNSToApiGW', zone=zone, target=dns.RecordTarget.from_alias( targets.ApiGateway(apigw)), record_name=f"minecrafter.{domain}")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here ########################################################################### # AWS SECRETS MANAGER - Templated secret ########################################################################### # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret", # generate_secret_string=aws_secretsmanager.SecretStringGenerator( # secret_string_template= "{\"username\":\"cleanbox\"}", # generate_string_key="password" # ) # ) ########################################################################### # CUSTOM CLOUDFORMATION RESOURCE ########################################################################### # customlambda = aws_lambda.Function(self,'customconfig', # handler='customconfig.on_event', # runtime=aws_lambda.Runtime.PYTHON_3_7, # code=aws_lambda.Code.asset('customconfig'), # ) # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None) # customlambda.add_to_role_policy(statement=customlambda_statement) # my_provider = cr.Provider(self, "MyProvider", # on_event_handler=customlambda, # # is_complete_handler=is_complete, # optional async "waiter" # log_retention=logs.RetentionDays.SIX_MONTHS # ) # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### sqs_to_elastic_cloud = aws_lambda.Function( self, 'sqs_to_elastic_cloud', handler='sqs_to_elastic_cloud.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elastic_cloud'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=logs.RetentionDays.ONE_DAY) sqs_to_elasticsearch_service = aws_lambda.Function( self, 'sqs_to_elasticsearch_service', handler='sqs_to_elasticsearch_service.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=logs.RetentionDays.ONE_DAY) # sqs_to_elasticsearch_service.add_environment("kinesis_firehose_name", "-") # sqs_to_elastic_cloud.add_environment("index_name", "-") ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### # sqs_to_elasticsearch_service_permission = aws_lambda.Permission(*, principal, action=None, event_source_token=None, scope=None, source_account=None, source_arn=None) ########################################################################### # AMAZON S3 BUCKETS ########################################################################### access_log_bucket = aws_s3.Bucket(self, "access_log_bucket") kinesis_log_bucket = aws_s3.Bucket(self, "kinesis_log_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*"], resources=["*"]) sqs_to_elastic_cloud.add_to_role_policy( lambda_supplemental_policy_statement) sqs_to_elasticsearch_service.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### access_log_topic = aws_sns.Topic(self, "access_log_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### access_log_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.SnsDestination(access_log_topic)) ########################################################################### # AWS SQS QUEUES ########################################################################### sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue_dlq") sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elasticsearch_service_queue_iqueue) sqs_to_elasticsearch_service_queue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq) sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue_dlq") sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue) sqs_to_elastic_cloud_queue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=sqs_to_elastic_cloud_queue_dlq) ########################################################################### # AWS SNS TOPIC SUBSCRIPTIONS ########################################################################### access_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue)) access_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription( sqs_to_elasticsearch_service_queue)) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### sqs_to_elastic_cloud.add_event_source( SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10)) sqs_to_elasticsearch_service.add_event_source( SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### ########################################################################### # AWS ELASTICSEARCH DOMAIN ACCESS POLICY ########################################################################### this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912") # s3_to_elasticsearch_access_logs_domain_access_policy_statement = aws_iam.PolicyStatement( # principals=[this_aws_account], # effect=aws_iam.Effect.ALLOW, # actions=["es:*"], # resources=["*"] # ) # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list=[] # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list.append(s3_to_elasticsearch_access_logs_domain_access_policy_statement) s3_to_elasticsearch_access_logs_domain = aws_elasticsearch.Domain( self, "s3-to-elasticsearch-access-logs-domain", # access_policies=s3_to_elasticsearch_access_logs_domain_access_policy_statement_list, version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3_to_elasticsearch_user_pool = aws_cognito.UserPool( self, "s3-to-elasticsearch-access-logs-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) ########################################################################### # AMAZON KINESIS FIREHOSE STREAM ########################################################################### # kinesis_policy_statement = aws_iam.PolicyStatement( # effect=aws_iam.Effect.ALLOW, # # actions=["es:*", "s3:*", "kms:*", "kinesis:*", "lambda:*"], # actions=["*"], # resources=["*"] # ) # kinesis_policy_document = aws_iam.PolicyDocument() # kinesis_policy_document.add_statements(kinesis_policy_statement) kinesis_firehose_stream_role = aws_iam.Role( self, "BaseVPCIAMLogRole", assumed_by=aws_iam.ServicePrincipal('firehose.amazonaws.com'), role_name=None, inline_policies={ "AllowLogAccess": aws_iam.PolicyDocument( assign_sids=False, statements=[ aws_iam.PolicyStatement(actions=[ '*', 'es:*', 'logs:PutLogEvents', 'logs:DescribeLogGroups', 'logs:DescribeLogsStreams' ], effect=aws_iam.Effect('ALLOW'), resources=['*']) ]) }) RetryOptions = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchRetryOptionsProperty( duration_in_seconds=300) s3_configuration = aws_kinesisfirehose.CfnDeliveryStream.S3DestinationConfigurationProperty( bucket_arn=kinesis_log_bucket.bucket_arn, role_arn=kinesis_firehose_stream_role.role_arn) ElasticsearchDestinationConfiguration = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchDestinationConfigurationProperty( # "BufferingHints" : ElasticsearchBufferingHints, # "CloudWatchLoggingOptions" : CloudWatchLoggingOptions, # "ClusterEndpoint" : String, domain_arn=s3_to_elasticsearch_access_logs_domain.domain_arn, index_name="s3-to-elasticsearch-accesslogs", index_rotation_period="OneDay", # "ProcessingConfiguration" : ProcessingConfiguration, retry_options=RetryOptions, role_arn=kinesis_firehose_stream_role.role_arn, # "S3BackupMode" : String, s3_configuration=s3_configuration # "TypeName" : String # "VpcConfiguration" : VpcConfiguration ) kinesis_firehose_stream = aws_kinesisfirehose.CfnDeliveryStream( self, "kinesis_firehose_stream", delivery_stream_encryption_configuration_input=None, delivery_stream_name=None, delivery_stream_type=None, elasticsearch_destination_configuration= ElasticsearchDestinationConfiguration, extended_s3_destination_configuration=None, http_endpoint_destination_configuration=None, kinesis_stream_source_configuration=None, redshift_destination_configuration=None, s3_destination_configuration=None, splunk_destination_configuration=None, tags=None) sqs_to_elasticsearch_service.add_environment( "FIREHOSE_NAME", kinesis_firehose_stream.ref) sqs_to_elasticsearch_service.add_environment( "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url) sqs_to_elasticsearch_service.add_environment("DEBUG", "False") sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-") sqs_to_elastic_cloud.add_environment( "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url) sqs_to_elastic_cloud.add_environment("DEBUG", "False")
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Image Bucket image_bucket = s3.Bucket(self, IMG_BUCKET_NAME, removal_policy=cdk.RemovalPolicy.DESTROY) cdk.CfnOutput(self, "imageBucket", value=image_bucket.bucket_name) image_bucket.add_cors_rule( allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT], allowed_origins=["*"], allowed_headers=["*"], max_age=3000, ) # Thumbnail Bucket resized_image_bucket = s3.Bucket( self, RESIZED_IMG_BUCKET_NAME, removal_policy=cdk.RemovalPolicy.DESTROY) cdk.CfnOutput(self, "resizedBucket", value=resized_image_bucket.bucket_name) resized_image_bucket.add_cors_rule( allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT], allowed_origins=["*"], allowed_headers=["*"], max_age=3000, ) # S3 Static bucket for website code web_bucket = s3.Bucket( self, WEBSITE_BUCKET_NAME, website_index_document="index.html", website_error_document="index.html", removal_policy=cdk.RemovalPolicy.DESTROY, # uncomment this and delete the policy statement below to allow public access to our # static website # public_read_access=true ) web_policy_statement = iam.PolicyStatement( actions=["s3:GetObject"], resources=[web_bucket.arn_for_objects("*")], principals=[iam.AnyPrincipal()], conditions={"IpAddress": { "aws:SourceIp": ["139.138.203.36"] }}, ) web_bucket.add_to_resource_policy(web_policy_statement) cdk.CfnOutput(self, "bucketURL", value=web_bucket.bucket_website_domain_name) # Deploy site contents to S3 Bucket s3_dep.BucketDeployment( self, "DeployWebsite", sources=[s3_dep.Source.asset("./public")], destination_bucket=web_bucket, ) # DynamoDB to store image labels partition_key = dynamodb.Attribute(name="image", type=dynamodb.AttributeType.STRING) table = dynamodb.Table( self, "ImageLabels", partition_key=partition_key, removal_policy=cdk.RemovalPolicy.DESTROY, ) cdk.CfnOutput(self, "ddbTable", value=table.table_name) # Lambda layer for Pillow library layer = lb.LayerVersion( self, "pil", code=lb.Code.from_asset("reklayer"), compatible_runtimes=[lb.Runtime.PYTHON_3_7], license="Apache-2.0", description= "A layer to enable the PIL library in our Rekognition Lambda", ) # Lambda function rek_fn = lb.Function( self, "rekognitionFunction", code=lb.Code.from_asset("rekognitionFunction"), runtime=lb.Runtime.PYTHON_3_7, handler="index.handler", timeout=cdk.Duration.seconds(30), memory_size=1024, layers=[layer], environment={ "TABLE": table.table_name, "BUCKET": image_bucket.bucket_name, "THUMBBUCKET": resized_image_bucket.bucket_name, }, ) image_bucket.grant_read(rek_fn) resized_image_bucket.grant_write(rek_fn) table.grant_write_data(rek_fn) rek_fn.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["rekognition:DetectLabels"], resources=["*"])) # Lambda for Synchronous front end serviceFn = lb.Function( self, "serviceFunction", code=lb.Code.from_asset("servicelambda"), runtime=lb.Runtime.PYTHON_3_7, handler="index.handler", environment={ "TABLE": table.table_name, "BUCKET": image_bucket.bucket_name, "RESIZEDBUCKET": resized_image_bucket.bucket_name, }, ) image_bucket.grant_write(serviceFn) resized_image_bucket.grant_write(serviceFn) table.grant_read_write_data(serviceFn) # Cognito User Pool Auth auto_verified_attrs = cognito.AutoVerifiedAttrs(email=True) sign_in_aliases = cognito.SignInAliases(email=True, username=True) user_pool = cognito.UserPool( self, "UserPool", self_sign_up_enabled=True, auto_verify=auto_verified_attrs, sign_in_aliases=sign_in_aliases, ) user_pool_client = cognito.UserPoolClient(self, "UserPoolClient", user_pool=user_pool, generate_secret=False) identity_pool = cognito.CfnIdentityPool( self, "ImageRekognitionIdentityPool", allow_unauthenticated_identities=False, cognito_identity_providers=[{ "clientId": user_pool_client.user_pool_client_id, "providerName": user_pool.user_pool_provider_name, }], ) # API Gateway cors_options = apigw.CorsOptions(allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS) api = apigw.LambdaRestApi( self, "imageAPI", default_cors_preflight_options=cors_options, handler=serviceFn, proxy=False, ) auth = apigw.CfnAuthorizer( self, "ApiGatewayAuthorizer", name="customer-authorizer", identity_source="method.request.header.Authorization", provider_arns=[user_pool.user_pool_arn], rest_api_id=api.rest_api_id, # type=apigw.AuthorizationType.COGNITO, type="COGNITO_USER_POOLS", ) assumed_by = iam.FederatedPrincipal( "cognito-identity.amazon.com", conditions={ "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" }, }, assume_role_action="sts:AssumeRoleWithWebIdentity", ) authenticated_role = iam.Role( self, "ImageRekognitionAuthenticatedRole", assumed_by=assumed_by, ) # IAM policy granting users permission to get and put their pictures policy_statement = iam.PolicyStatement( actions=["s3:GetObject", "s3:PutObject"], effect=iam.Effect.ALLOW, resources=[ image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/*", image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/", resized_image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/*", resized_image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/", ], ) # IAM policy granting users permission to list their pictures list_policy_statement = iam.PolicyStatement( actions=["s3:ListBucket"], effect=iam.Effect.ALLOW, resources=[ image_bucket.bucket_arn, resized_image_bucket.bucket_arn ], conditions={ "StringLike": { "s3:prefix": ["private/${cognito-identity.amazonaws.com:sub}/*"] } }, ) authenticated_role.add_to_policy(policy_statement) authenticated_role.add_to_policy(list_policy_statement) # Attach role to our Identity Pool cognito.CfnIdentityPoolRoleAttachment( self, "IdentityPoolRoleAttachment", identity_pool_id=identity_pool.ref, roles={"authenticated": authenticated_role.role_arn}, ) # Get some outputs from cognito cdk.CfnOutput(self, "UserPoolId", value=user_pool.user_pool_id) cdk.CfnOutput(self, "AppClientId", value=user_pool_client.user_pool_client_id) cdk.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref) # New Amazon API Gateway with AWS Lambda Integration success_response = apigw.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ) error_response = apigw.IntegrationResponse( selection_pattern="(\n|.)+", status_code="500", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ) request_template = json.dumps({ "action": "$util.escapeJavaScript($input.params('action'))", "key": "$util.escapeJavaScript($input.params('key'))", }) lambda_integration = apigw.LambdaIntegration( serviceFn, proxy=False, request_parameters={ "integration.request.querystring.action": "method.request.querystring.action", "integration.request.querystring.key": "method.request.querystring.key", }, request_templates={"application/json": request_template}, passthrough_behavior=apigw.PassthroughBehavior.WHEN_NO_TEMPLATES, integration_responses=[success_response, error_response], ) imageAPI = api.root.add_resource("images") success_resp = apigw.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ) error_resp = apigw.MethodResponse( status_code="500", response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ) # GET /images get_method = imageAPI.add_method( "GET", lambda_integration, authorization_type=apigw.AuthorizationType.COGNITO, request_parameters={ "method.request.querystring.action": True, "method.request.querystring.key": True, }, method_responses=[success_resp, error_resp], ) # DELETE /images delete_method = imageAPI.add_method( "DELETE", lambda_integration, authorization_type=apigw.AuthorizationType.COGNITO, request_parameters={ "method.request.querystring.action": True, "method.request.querystring.key": True, }, method_responses=[success_resp, error_resp], ) # Override the authorizer id because it doesn't work when defininting it as a param # in add_method get_method_resource = get_method.node.find_child("Resource") get_method_resource.add_property_override("AuthorizerId", auth.ref) delete_method_resource = delete_method.node.find_child("Resource") delete_method_resource.add_property_override("AuthorizerId", auth.ref) # Building SQS queue and DeadLetter Queue dl_queue = sqs.Queue( self, "ImageDLQueue", queue_name="ImageDLQueue", ) dl_queue_opts = sqs.DeadLetterQueue(max_receive_count=2, queue=dl_queue) queue = sqs.Queue( self, "ImageQueue", queue_name="ImageQueue", visibility_timeout=cdk.Duration.seconds(30), receive_message_wait_time=cdk.Duration.seconds(20), dead_letter_queue=dl_queue_opts, ) # S3 Bucket Create Notification to SQS # Whenever an image is uploaded add it to the queue image_bucket.add_object_created_notification( s3n.SqsDestination(queue), s3.NotificationKeyFilter(prefix="private/"))
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) user_pool = aws_cognito.UserPool( self, 'UserPool', user_pool_name='UserPoolForApiGateway', removal_policy=cdk.RemovalPolicy.DESTROY, self_sign_up_enabled=True, sign_in_aliases={'email': True}, auto_verify={'email': True}, password_policy={ 'min_length': 8, 'require_lowercase': False, 'require_digits': False, 'require_uppercase': False, 'require_symbols': False, }, account_recovery=aws_cognito.AccountRecovery.EMAIL_ONLY) user_pool_client = aws_cognito.UserPoolClient( self, 'UserPoolClient', user_pool=user_pool, auth_flows={ 'admin_user_password': True, 'user_password': True, 'custom': True, 'user_srp': True }, supported_identity_providers=[ aws_cognito.UserPoolClientIdentityProvider.COGNITO ]) auth = aws_apigateway.CognitoUserPoolsAuthorizer( self, 'AuthorizerForHelloWorldApi', cognito_user_pools=[user_pool]) helloworld_lambda_fn = aws_lambda.Function( self, 'HelloWorldLambdaFn', runtime=aws_lambda.Runtime.PYTHON_3_9, function_name="HelloWorldApi", handler="helloworld.lambda_handler", description='Function that returns 200 with "Hello world!"', code=aws_lambda.Code.from_asset( os.path.join(os.path.dirname(__file__), 'src/main/python')), timeout=cdk.Duration.minutes(5)) helloworld_lambda_rest_api = aws_apigateway.LambdaRestApi( self, 'HelloWorldLambdaRestApi', rest_api_name="helloworld-api", handler=helloworld_lambda_fn, proxy=False, deploy=True, deploy_options=aws_apigateway.StageOptions(stage_name="v1"), endpoint_export_name='ApiGatewayRestApiEndpoint') hello = helloworld_lambda_rest_api.root.add_resource("hello") hello.add_method( 'GET', aws_apigateway.LambdaIntegration(handler=helloworld_lambda_fn), authorization_type=aws_apigateway.AuthorizationType.COGNITO, authorizer=auth) cdk.CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id) cdk.CfnOutput(self, 'UserPoolClientId', value=user_pool_client.user_pool_client_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Let's start with creating an IAM Service Role, later to be assumed by our ECS Fargate Container # After creating any resource, we'll be attaching IAM policies to this role using the `fargate_role`. fargate_role = iam.Role( self, "ecsTaskExecutionRole", role_name="ecsTaskExecutionRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), description="Custom Role assumed by ECS Fargate (container)" ) # S3: Create a Bucket for Unicorn Pursuit web page, and grant public read: bucket = s3.Bucket(self, "www.unicornpursuit.com", bucket_name="www.unicornpursuit.com", access_control=s3.BucketAccessControl.PUBLIC_READ, ) # Grant public read access to the bucket bucket.grant_public_access() # Grant S3 Read/Write access to our Fargate Container fargate_role.add_to_policy(statement=iam.PolicyStatement( resources=[bucket.bucket_arn], actions=["s3:*"] )) # DynamoDB: Create Table for Project Info (ID, Owner, Content, Photo and Votes) voting_ddb = ddb.CfnTable( self, "UnicornDynamoDBVoting", table_name="UnicornDynamoDBVoting", key_schema=[ ddb.CfnTable.KeySchemaProperty(attribute_name="id",key_type="HASH"), ddb.CfnTable.KeySchemaProperty(attribute_name="owner",key_type="RANGE"), ], # In the new DynamoDB, you can't create AttDefProperty for non-key attributes. attribute_definitions=[ ddb.CfnTable.AttributeDefinitionProperty(attribute_name="id",attribute_type="N"), ddb.CfnTable.AttributeDefinitionProperty(attribute_name="owner",attribute_type="S"), ], provisioned_throughput=ddb.CfnTable.ProvisionedThroughputProperty( read_capacity_units=5, write_capacity_units=5 ) ) # Second DynamoDB table called "users" for storing who voted for whom # Example: [email protected] gave 5 points to project 323, 4 points to 111 etc. users_ddb = ddb.Table( self, "UnicornDynamoDBUsers", table_name="UnicornDynamoDBUsers", partition_key=ddb.Attribute( name="Owner", type=ddb.AttributeType.STRING ) ) # Grant RW writes for Unicorn App in Fargate fargate_role.add_to_policy(statement=iam.PolicyStatement( resources=[voting_ddb.attr_arn, users_ddb.table_arn], actions=["dynamodb:*"] )) # Cognito: Create User Pool userpool = cognito.UserPool( self, "CognitoUnicornUserPool", user_pool_name="CognitoUnicornUserPool", self_sign_up_enabled=True, ## Require username or email for users to sign in sign_in_aliases=cognito.SignInAliases( username=False, email=True, ), # Require users to give their full name when signing up required_attributes=cognito.RequiredAttributes( fullname=True, email=True, phone_number=True ), # Verify new sign ups using email auto_verify=cognito.AutoVerifiedAttrs( email=False, phone=True, ), # Configure OTP Settings () user_verification=cognito.UserVerificationConfig( sms_message="Hey Unicorn Hunter, welcome to Unicorn Pursuit! Your OTP is {####}", ), # Set up required password policy password_policy=cognito.PasswordPolicy( min_length=12, require_symbols=True, require_lowercase=True, require_uppercase=True, require_digits=True, ) ) ## Cognito: Create App Client & create Authentication Flow with User and Password userpool.add_client( "UnicornAppClient", user_pool_client_name="UnicornAppClient", generate_secret=False, ## We'll allow both Flows, Implicit and Authorization Code, and decide in the app which to use. auth_flows=cognito.AuthFlow( admin_user_password=False, custom=False, refresh_token=True, user_password=True, user_srp=False ), ) # Grant Cognito Access to Fargate. Include SSM, so Client App ID can be retrived. fargate_role.add_to_policy(statement=iam.PolicyStatement( resources=["*"], actions=["ssm:*"] )) fargate_role.add_to_policy(statement=iam.PolicyStatement( resources=[userpool.user_pool_arn], actions=["cognito-identity:*","cognito-idp:*","cognito-sync:*"] )) ## Fargate: Create ECS:Fargate with ECR uploaded image vpc = ec2.Vpc(self, "UnicornVPC", max_azs=2, nat_gateways=None) """ VPC with optimal NAT GW usage vpc_lowcost = ec2.Vpc(self, "LowCostVPC", max_azs=2, cidr="10.7.0.0/16", nat_gateways=None, subnet_configuration=[ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.PUBLIC, name="Public", cidr_mask=24, ), ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.ISOLATED, name="Private", cidr_mask=24, ) ], ) """ linux_ami = ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE ) nat_ec2 = ec2.Instance(self, "NAT", instance_name="NAT", vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), instance_type=ec2.InstanceType(instance_type_identifier="t3.nano"), machine_image=linux_ami, user_data=ec2.UserData.for_linux(), source_dest_check=False, ) # Configure Linux Instance to act as NAT Instance nat_ec2.user_data.add_commands("sysctl -w net.ipv4.ip_forward=1") nat_ec2.user_data.add_commands("/sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE") # Add a static route to the ISOLATED subnet, pointing 0.0.0.0/0 to a NAT EC2 selection = vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE ) for subnet in selection.subnets: subnet.add_route("DefaultNAT", router_id=nat_ec2.instance_id, router_type=ec2.RouterType.INSTANCE, destination_cidr_block="0.0.0.0/0") # Create ECS Cluster cluster = ecs.Cluster(self, "UnicornCluster", vpc=vpc) ecr.Repository(self, "unicorn", repository_name="unicorn") fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, "UnicornFargateService", cluster=cluster, cpu=512, desired_count=1, task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_registry("057097267726.dkr.ecr.eu-west-1.amazonaws.com/unicorn"), # image=ecs.ContainerImage.from_registry(repo.repository_uri_for_tag()), container_port=8080, container_name="unicorn", execution_role=fargate_role, ), memory_limit_mib=1024, public_load_balancer=True ) fargate_service.service.connections.security_groups[0].add_ingress_rule( peer = ec2.Peer.ipv4(vpc.vpc_cidr_block), connection = ec2.Port.tcp(8080), description="Allow http inbound from VPC" ) # Update NAT EC2 Security Group, to allow only HTTPS from Fargate Service Security Group. nat_ec2.connections.security_groups[0].add_ingress_rule( peer = fargate_service.service.connections.security_groups[0], connection = ec2.Port.tcp(443), description="Allow https from Fargate Service" ) # Grant ECR Access to Fargate by attaching an existing ReadOnly policy. so that Unicorn Docker Image can be pulled. #fargate_role.add_managed_policy(iam.ManagedPolicy("AmazonEC2ContainerRegistryReadOnly")) fargate_role.add_to_policy(statement=iam.PolicyStatement( resources=["*"], actions=["ecr:*"] ))
def __init__(self, scope: core.Construct, id: str, **kwargs): super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') user_pool2 = cognito.UserPool( self, id=f'{env_name}-precog', auto_verify=cognito.AutoVerifiedAttrs(email=True), sign_in_aliases=cognito.SignInAliases(email=True, phone=True), self_sign_up_enabled=True, user_pool_name=f'{env_name}-cdk-2-user-pool', custom_attributes={ "param1": cognito.StringAttribute(mutable=True) }, password_policy=cognito.PasswordPolicy(min_length=10, require_lowercase=True, require_digits=True, require_symbols=False, require_uppercase=True)) user_pool = cognito.CfnUserPool( self, id=f'{env_name}-cognito-user-pool', auto_verified_attributes=['email'], username_attributes=['email', 'phone_number'], user_pool_name=f'{env_name}-cdk-user-pool', schema=[{ "attributeDataType": "String", "name": "param1", "mutable": True }], policies=cognito.CfnUserPool.PoliciesProperty( password_policy=cognito.CfnUserPool.PasswordPolicyProperty( minimum_length=10, require_lowercase=True, require_numbers=True, require_symbols=False, require_uppercase=True))) user_pool_client2 = cognito.UserPoolClient( self, id=f'{env_name}-pool-client2', user_pool=user_pool2, user_pool_client_name=f'{env_name}-cdk-app-client2') identity_pool2 = cognito.CfnIdentityPool( self, id=f'{env_name}-identify-pool-2', allow_unauthenticated_identities=False, cognito_identity_providers=[ cognito.CfnIdentityPool.CognitoIdentityProviderProperty( client_id=user_pool_client2.user_pool_client_id, provider_name=user_pool.attr_provider_name) ], identity_pool_name=f'{env_name}-cdk-identity-pool2') user_pool_client = cognito.CfnUserPoolClient( self, id=f'{env_name}-pool-client', user_pool_id=user_pool.ref, client_name=f'{env_name}-cdk-app-client') identity_pool = cognito.CfnIdentityPool( self, id=f'{env_name}-identify-pool', allow_unauthenticated_identities=False, cognito_identity_providers=[ cognito.CfnIdentityPool.CognitoIdentityProviderProperty( client_id=user_pool_client.ref, provider_name=user_pool.attr_provider_name) ], identity_pool_name=f'{env_name}-cdk-identity-pool') ssm.StringParameter( self, id='app-id', parameter_name=f"/{env_name}/cognito-app-client-id", string_value=user_pool_client.ref) ssm.StringParameter(self, id='user-pool-id', parameter_name=f"/{env_name}/cognito-user-pool-id", string_value=user_pool_client.user_pool_id) ssm.StringParameter( self, id='identity-pool-id', parameter_name=f"/{env_name}/cognito-identity-pool-id", string_value=identity_pool.ref # ref returns the id )
def base_cognito_user_pool(construct, **kwargs): """ Function that generates a Cognito User Pool. :param construct: Custom construct that will use this function. From the external construct is usually 'self'. :param kwargs: Consist of required 'queue_name' and optionals 'queue_delivery_delay' and 'queue_visibility_timeout'. :return: DynamoDB Table Construct. """ user_pool_name = construct.prefix + "_" + kwargs[ "pool_name"] + "_pool_" + construct.environment_ if kwargs.get("email") is not None: email_settings = cognito.EmailSettings( from_=kwargs["email"]["from"], reply_to=kwargs["email"].get("reply_to")) else: email_settings = None password_policy_settings = kwargs.get("password_policy") temporary_password_validation_time = ( core.Duration.days( password_policy_settings.get("temporary_password_duration")) if password_policy_settings.get("temporary_password_duration") is not None else None) password_policy = cognito.PasswordPolicy( min_length=password_policy_settings.get("minimum_length"), temp_password_validity=temporary_password_validation_time, require_lowercase=password_policy_settings.get("require", {}).get("lower_case"), require_uppercase=password_policy_settings.get("require", {}).get("upper_case"), require_digits=password_policy_settings.get("require", {}).get("digits"), require_symbols=password_policy_settings.get("require", {}).get("symbols"), ) sign_up_info = kwargs["sign_up"] self_sing_up = sign_up_info["enabled"] user_verification_info = base_user_verification( sign_up_info=sign_up_info["user_verification"]) user_invitation = kwargs.get("invitation") user_invitation_configuration = cognito.UserInvitationConfig( email_subject=user_invitation.get("email", {}).get("subject"), email_body=user_invitation.get("email", {}).get("body"), sms_message=user_invitation.get("sms", {}).get("body"), ) trigger_functions = kwargs.get("triggers", {}) lambda_triggers = base_lambda_triggers(construct, trigger_functions=trigger_functions) sign_in_list = kwargs.get("sign_in").get("order", list()) sing_in_kwargs = dict() for element in sign_in_list: sing_in_kwargs[element] = True sign_in_aliases = cognito.SignInAliases(**sing_in_kwargs) attributes = kwargs.get("attributes") standard_attributes_list = attributes.get("standard", list()) standard_attributes_dict = dict() for element in standard_attributes_list: standard_attributes_dict[element["name"]] = cognito.StandardAttribute( mutable=element.get("mutable"), required=element.get("required")) standard_attributes = cognito.StandardAttributes( **standard_attributes_dict) custom_attributes_list = attributes.get("custom", list()) if len(custom_attributes_list) > 0: custom_attributes = base_custom_attributes( custom_attributes_list=custom_attributes_list) else: custom_attributes = None user_pool = cognito.UserPool( construct, id=user_pool_name, user_pool_name=user_pool_name, email_settings=email_settings, password_policy=password_policy, self_sign_up_enabled=self_sing_up, user_verification=user_verification_info, user_invitation=user_invitation_configuration, sign_in_aliases=sign_in_aliases, standard_attributes=standard_attributes, custom_attributes=custom_attributes, lambda_triggers=lambda_triggers, ) user_pool_client = None if kwargs.get("app_client", {}).get("enabled") is True: client_name = kwargs["app_client"]["client_name"] generate_secret = kwargs["app_client"]["generate_secret"] user_pool_client_name = construct.prefix + "_" + client_name + "_client_" + construct.environment_ auth_flows = None auth_flows_configuration = kwargs["app_client"].get("auth_flows") if auth_flows_configuration is not None: auth_flows = cognito.AuthFlow(**auth_flows_configuration) user_pool_client = cognito.UserPoolClient( construct, id=user_pool_client_name, user_pool_client_name=user_pool_client_name, generate_secret=generate_secret, auth_flows=auth_flows, user_pool=user_pool, ) return user_pool, user_pool_client
def add_cognito(self): """ Sets up the cognito infrastructure with the user pool, custom domain and app client for use by the ALB. """ # Create the user pool that holds our users self.user_pool = cognito.UserPool( self, "user-pool", account_recovery=cognito.AccountRecovery. EMAIL_AND_PHONE_WITHOUT_MFA, auto_verify=cognito.AutoVerifiedAttrs(email=True, phone=True), self_sign_up_enabled=True, standard_attributes=cognito.StandardAttributes( email=cognito.StandardAttribute(mutable=True, required=True), given_name=cognito.StandardAttribute(mutable=True, required=True), family_name=cognito.StandardAttribute(mutable=True, required=True))) # Add a lambda function that automatically confirms new users without # email/phone verification, just for this demo auto_confirm_function = _lambda.Function( self, "auto-confirm-function", code=_lambda.Code.from_asset(path=os.path.join( os.path.dirname(__file__), "..", "auto_confirm_function")), handler="lambda_handler.lambda_handler", runtime=_lambda.Runtime.PYTHON_3_8, ) self.user_pool.add_trigger( operation=cognito.UserPoolOperation.PRE_SIGN_UP, fn=auto_confirm_function) # Add a custom domain for the hosted UI self.user_pool_custom_domain = self.user_pool.add_domain( "user-pool-domain", cognito_domain=cognito.CognitoDomainOptions( domain_prefix=self.config.cognito_custom_domain)) # Create an app client that the ALB can use for authentication self.user_pool_client = self.user_pool.add_client( "alb-app-client", user_pool_client_name="AlbAuthentication", generate_secret=True, o_auth=cognito.OAuthSettings( callback_urls=[ # This is the endpoint where the ALB accepts the # response from Cognito f"https://{self.config.application_dns_name}/oauth2/idpresponse", # This is here to allow a redirect to the login page # after the logout has been completed f"https://{self.config.application_dns_name}" ], flows=cognito.OAuthFlows(authorization_code_grant=True), scopes=[cognito.OAuthScope.OPENID]), supported_identity_providers=[ cognito.UserPoolClientIdentityProvider.COGNITO ]) # Logout URLs and redirect URIs can't be set in CDK constructs natively ...yet user_pool_client_cf: cognito.CfnUserPoolClient = self.user_pool_client.node.default_child user_pool_client_cf.logout_ur_ls = [ # This is here to allow a redirect to the login page # after the logout has been completed f"https://{self.config.application_dns_name}" ] self.user_pool_full_domain = self.user_pool_custom_domain.base_url() redirect_uri = urllib.parse.quote('https://' + self.config.application_dns_name) self.user_pool_logout_url = f"{self.user_pool_full_domain}/logout?" \ + f"client_id={self.user_pool_client.user_pool_client_id}&" \ + f"logout_uri={redirect_uri}" self.user_pool_user_info_url = f"{self.user_pool_full_domain}/oauth2/userInfo"
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # S3: Create a Bucket for Unicorn Pursuit web page, and grant public read: bucket = s3.Bucket( self, "www.unicornpursuit.com", bucket_name="www.unicornpursuit.com", access_control=s3.BucketAccessControl.PUBLIC_READ, ) # Grant public read access to the bucket bucket.grant_public_access() # DynamoDB: Create Table for Project Info (ID, Owner, Content, Photo and Votes) voting = ddb.CfnTable( self, "UnicornDynamoDBVoting", table_name="UnicornDynamoDBVoting", key_schema=[ ddb.CfnTable.KeySchemaProperty(attribute_name="id", key_type="HASH"), ddb.CfnTable.KeySchemaProperty(attribute_name="owner", key_type="RANGE"), ], # In the new DynamoDB, you can't create AttDefProperty for non-key attributes. attribute_definitions=[ ddb.CfnTable.AttributeDefinitionProperty(attribute_name="id", attribute_type="N"), ddb.CfnTable.AttributeDefinitionProperty( attribute_name="owner", attribute_type="S"), ], provisioned_throughput=ddb.CfnTable.ProvisionedThroughputProperty( read_capacity_units=5, write_capacity_units=5)) # Grant RW writes for Unicorn App in Fargate and relevant Lambdas invoked from API Gateway # voting.grant_read_write_data(user) # Second DynamoDB table called "users" for storing who voted for whom # Example: [email protected] gave 5 points to project 323, 4 points to 111 etc. users = ddb.Table(self, "UnicornDynamoDBUsers", table_name="UnicornDynamoDBUsers", partition_key=ddb.Attribute( name="owner", type=ddb.AttributeType.STRING)) # Cognito: Create User Pool userpool = cognito.UserPool( self, "CognitoUnicornUserPool", user_pool_name="CognitoUnicornUserPool", self_sign_up_enabled=True, ## Require username or email for users to sign in sign_in_aliases=cognito.SignInAliases( username=False, email=True, ), # Require users to give their full name when signing up required_attributes=cognito.RequiredAttributes(fullname=True, email=True, phone_number=True), # Verify new sign ups using email auto_verify=cognito.AutoVerifiedAttrs( email=False, phone=True, ), # Configure OTP Settings () user_verification=cognito.UserVerificationConfig( sms_message= "Hey Unicorn Hunter, welcome to Unicorn Pursuit! Your OTP is {####}", ), # Set up required password policy password_policy=cognito.PasswordPolicy( min_length=12, require_symbols=True, require_lowercase=True, require_uppercase=True, require_digits=True, )) ## Cognito: Create App Client & create Authentication Flow with User and Password client = userpool.add_client( "UnicornAppClient", user_pool_client_name="UnicornAppClient", generate_secret=False, ## We'll allow both Flows, Implicit and Authorization Code, and decide in the app which to use. auth_flows=cognito.AuthFlow(admin_user_password=False, custom=False, refresh_token=True, user_password=True, user_srp=False), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get the hosted Zone and create a certificate for our domain hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=HOSTED_ZONE_ID, zone_name=HOSTED_ZONE_NAME) cert = certificatemanager.DnsValidatedCertificate( self, "Certificate", hosted_zone=hosted_zone, domain_name=APP_DNS_NAME) # Set up a new VPC vpc = ec2.Vpc(self, "FargateDemoVpc", max_azs=2) # Set up an ECS Cluster for fargate cluster = ecs.Cluster(self, "FargateCluster", vpc=vpc) # Configure the user pool and related entities for authentication user_pool = cognito.UserPool( self, "UserPool", self_sign_up_enabled=True, user_pool_name="FargateDemoUserPool", ) user_pool_custom_domain = cognito.CfnUserPoolDomain( self, "CustomDomain", domain=COGNITO_CUSTOM_DOMAIN, user_pool_id=user_pool.user_pool_id) user_pool_client = cognito.UserPoolClient( self, "AppClient", user_pool=user_pool, user_pool_client_name="AlbAuthentication", generate_secret=True) # Set the attributes on the user pool client that can't be updated via the construct user_pool_client_cf: cognito.CfnUserPoolClient = user_pool_client.node.default_child user_pool_client_cf.allowed_o_auth_flows = ["code"] user_pool_client_cf.allowed_o_auth_scopes = ["openid"] user_pool_client_cf.callback_ur_ls = [ f"https://{APP_DNS_NAME}/oauth2/idpresponse", f"https://{APP_DNS_NAME}" ] user_pool_client_cf.default_redirect_uri = f"https://{APP_DNS_NAME}/oauth2/idpresponse" user_pool_client_cf.logout_ur_ls = [ f"https://{APP_DNS_NAME}/logout", f"https://{APP_DNS_NAME}/" ] user_pool_client_cf.supported_identity_providers = [ # This is where you'd add external identity providers as well. "COGNITO" ] user_pool_client_cf.allowed_o_auth_flows_user_pool_client = True # Define the Docker Image for our container (the CDK will do the build and push for us!) docker_image = ecr_assets.DockerImageAsset( self, "JwtApp", directory=os.path.join(os.path.dirname(__file__), "..", "src")) user_pool_domain = f"{user_pool_custom_domain.domain}.auth.{self.region}.amazoncognito.com" # Define the fargate service + ALB fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService( self, "FargateService", cluster=cluster, certificate=cert, domain_name=f"{APP_DNS_NAME}", domain_zone=hosted_zone, task_image_options={ "image": ecs.ContainerImage.from_docker_image_asset(docker_image), "environment": { "PORT": "80", "LOGOUT_URL": f"https://{user_pool_domain}/logout?" + f"client_id={user_pool_client.user_pool_client_id}&" + f"redirect_uri={ urllib.parse.quote(f'https://{APP_DNS_NAME}')}&" + f"response_type=code&state=STATE&scope=openid" } }) # Add an additional HTTPS egress rule to the Load Balancers security group to talk to Cognito lb_security_group = fargate_service.load_balancer.connections.security_groups[ 0] lb_security_group.add_egress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port(protocol=ec2.Protocol.TCP, string_representation="443", from_port=443, to_port=443), description="Outbound HTTPS traffic to get to Cognito") # Allow 10 seconds for in flight requests before termination, the default of 5 minutes is much too high. fargate_service.target_group.set_attribute( key="deregistration_delay.timeout_seconds", value="10") # Enable authentication on the Load Balancer alb_listener: elb.CfnListener = fargate_service.listener.node.default_child elb.CfnListenerRule( self, "AuthenticateRule", actions=[{ "type": "authenticate-cognito", "authenticateCognitoConfig": elb.CfnListenerRule.AuthenticateCognitoConfigProperty( user_pool_arn=user_pool.user_pool_arn, user_pool_client_id=user_pool_client.user_pool_client_id, user_pool_domain=user_pool_custom_domain.domain), "order": 1 }, { "type": "forward", "order": 10, "targetGroupArn": fargate_service.target_group.target_group_arn }], conditions=[{ "field": "host-header", "hostHeaderConfig": { "values": [f"{APP_DNS_NAME}"] } }], # Reference the Listener ARN listener_arn=alb_listener.ref, priority=1000)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### parse_image_list_file = aws_lambda.Function( self, 'parse_image_list_file', handler='parse_image_list_file.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('parse_image_list_file'), memory_size=10240, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) list_objects = aws_lambda.Function( self, 'list_objects', handler='list_objects.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('list_objects'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) get_size_and_store = aws_lambda.Function( self, 'get_size_and_store', handler='get_size_and_store.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('get_size_and_store'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) ########################################################################### # AMAZON S3 BUCKETS ########################################################################### images_bucket = aws_s3.Bucket(self, "images_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"], resources=["*"]) parse_image_list_file.add_to_role_policy( lambda_supplemental_policy_statement) list_objects.add_to_role_policy(lambda_supplemental_policy_statement) get_size_and_store.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### # notification_topic = aws_sns.Topic(self, "notification_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### images_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.LambdaDestination(parse_image_list_file)) ########################################################################### # AWS SQS QUEUES ########################################################################### comprehend_queue_iqueue = aws_sqs.Queue(self, "comprehend_queue_iqueue") comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=comprehend_queue_iqueue) comprehend_queue = aws_sqs.Queue( self, "comprehend_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=comprehend_queue_iqueue_dlq) rekognition_queue_iqueue = aws_sqs.Queue(self, "rekognition_queue_iqueue") rekognition_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=rekognition_queue_iqueue) rekognition_queue = aws_sqs.Queue( self, "rekognition_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=rekognition_queue_dlq) object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue") object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10, queue=object_queue_iqueue) object_queue = aws_sqs.Queue( self, "object_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=object_queue_dlq) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### get_size_and_store.add_event_source( SqsEventSource(object_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### s3workflow_domain = aws_elasticsearch.Domain( self, "s3workflow_domain", version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3workflow_pool = aws_cognito.UserPool( self, "s3workflow-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) ########################################################################### # AMAZON VPC ########################################################################### vpc = aws_ec2.Vpc(self, "s3workflowVPC", max_azs=3) # default is all AZs in region ########################################################################### # AMAZON ECS CLUSTER ########################################################################### cluster = aws_ecs.Cluster(self, "s3", vpc=vpc) ########################################################################### # AMAZON ECS Repositories ########################################################################### rekognition_repository = aws_ecr.Repository( self, "rekognition_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY")) comprehend_repository = aws_ecr.Repository( self, "comprehend_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY")) ########################################################################### # AMAZON ECS Roles and Policies ########################################################################### task_execution_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*" ], resources=["*"]) task_execution_policy_document = aws_iam.PolicyDocument() task_execution_policy_document.add_statements( task_execution_policy_statement) task_execution_policy = aws_iam.Policy( self, "task_execution_policy", document=task_execution_policy_document) task_execution_role = aws_iam.Role( self, "task_execution_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')) task_execution_role.attach_inline_policy(task_execution_policy) task_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*", "comprehend:*", "es:*" ], resources=["*"]) task_policy_document = aws_iam.PolicyDocument() task_policy_document.add_statements(task_policy_statement) task_policy = aws_iam.Policy(self, "task_policy", document=task_policy_document) task_role = aws_iam.Role( self, "task_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')) task_role.attach_inline_policy(task_policy) ########################################################################### # AMAZON ECS Task definitions ########################################################################### rekognition_task_definition = aws_ecs.TaskDefinition( self, "rekognition_task_definition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) comprehend_task_definition = aws_ecs.TaskDefinition( self, "comprehend_task_definition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) ########################################################################### # AMAZON ECS Images ########################################################################### rekognition_ecr_image = aws_ecs.EcrImage( repository=rekognition_repository, tag="latest") comprehend_ecr_image = aws_ecs.EcrImage( repository=comprehend_repository, tag="latest") ########################################################################### # ENVIRONMENT VARIABLES ########################################################################### environment_variables = {} environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url environment_variables[ "REKOGNITION_QUEUE"] = rekognition_queue.queue_url environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name environment_variables[ "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint parse_image_list_file.add_environment( "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) parse_image_list_file.add_environment("QUEUEURL", rekognition_queue.queue_url) parse_image_list_file.add_environment("DEBUG", "False") parse_image_list_file.add_environment("BUCKET", "-") parse_image_list_file.add_environment("KEY", "-") list_objects.add_environment("QUEUEURL", object_queue.queue_url) list_objects.add_environment("ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) list_objects.add_environment("S3_BUCKET_NAME", images_bucket.bucket_name) list_objects.add_environment("S3_BUCKET_PREFIX", "images/") list_objects.add_environment("S3_BUCKET_SUFFIX", "") list_objects.add_environment("LOGGING_LEVEL", "INFO") get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url) get_size_and_store.add_environment("ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) get_size_and_store.add_environment("S3_BUCKET_NAME", images_bucket.bucket_name) get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/") get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "") get_size_and_store.add_environment("LOGGING_LEVEL", "INFO") ########################################################################### # ECS Log Drivers ########################################################################### rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs( stream_prefix="s3workflow", log_retention=aws_logs.RetentionDays("ONE_DAY")) comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs( stream_prefix="s3workflow", log_retention=aws_logs.RetentionDays("ONE_DAY")) ########################################################################### # ECS Task Definitions ########################################################################### rekognition_task_definition.add_container( "rekognition_task_definition", image=rekognition_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=rekognition_task_log_driver) comprehend_task_definition.add_container( "comprehend_task_definition", image=comprehend_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=comprehend_task_log_driver) ########################################################################### # AWS ROUTE53 HOSTED ZONE ########################################################################### hosted_zone = aws_route53.HostedZone( self, "hosted_zone", zone_name="s3workflow.com", comment="private hosted zone for s3workflow system") hosted_zone.add_vpc(vpc)
def __init__(self, scope: cdk.Construct, id: str, domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # User pool and user pool OAuth client cognito_user_pool = cognito.UserPool( self, "UserPool", removal_policy=cdk.RemovalPolicy.DESTROY) cognito.UserPoolDomain( self, "UserPoolDomain", cognito_domain=cognito.CognitoDomainOptions( domain_prefix=APPLICATION_PREFIX + "-" + APPLICATION_SUFFIX), user_pool=cognito_user_pool, ) cognito_admin_user = cr.AwsCustomResource( self, "UserPoolAdminUserResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="adminCreateUser", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "Username": "******", "UserAttributes": [{ "Name": "email", "Value": "consoleme_admin@" + domain_name }], "TemporaryPassword": ADMIN_TEMP_PASSWORD, }, physical_resource_id=cr.PhysicalResourceId.of( cognito_user_pool.user_pool_id), ), ) cognito_admin_group = cr.AwsCustomResource( self, "UserPoolAdminGroupResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="createGroup", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "GroupName": "consoleme_admins", }, physical_resource_id=cr.PhysicalResourceId.of( id="UserPoolAdminGroupResource"), ), ) cr.AwsCustomResource( self, "UserPoolUserGroupResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="createGroup", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "GroupName": "consoleme_users", }, physical_resource_id=cr.PhysicalResourceId.of( id="UserPoolUserGroupResource"), ), ) cognito_assign_admin_group = cr.AwsCustomResource( self, "UserPoolAssignAdminGroupResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="adminAddUserToGroup", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "GroupName": "consoleme_admins", "Username": "******", }, physical_resource_id=cr.PhysicalResourceId.of( id="UserPoolAssignAdminGroupResource"), ), ) cognito_assign_admin_group.node.add_dependency(cognito_admin_user) cognito_assign_admin_group.node.add_dependency(cognito_admin_group) self.cognito_user_pool = cognito_user_pool
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here ########################################################################### # AWS SECRETS MANAGER - Templated secret ########################################################################### # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret", # generate_secret_string=aws_secretsmanager.SecretStringGenerator( # secret_string_template= "{\"username\":\"cleanbox\"}", # generate_string_key="password" # ) # ) ########################################################################### # CUSTOM CLOUDFORMATION RESOURCE ########################################################################### # customlambda = aws_lambda.Function(self,'customconfig', # handler='customconfig.on_event', # runtime=aws_lambda.Runtime.PYTHON_3_7, # code=aws_lambda.Code.asset('customconfig'), # ) # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None) # customlambda.add_to_role_policy(statement=customlambda_statement) # my_provider = cr.Provider(self, "MyProvider", # on_event_handler=customlambda, # # is_complete_handler=is_complete, # optional async "waiter" # log_retention=logs.RetentionDays.SIX_MONTHS # ) # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### sqs_to_elastic_cloud = aws_lambda.Function( self, 'sqs_to_elastic_cloud', handler='sqs_to_elastic_cloud.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elastic_cloud'), memory_size=4096, timeout=core.Duration.seconds(301), log_retention=logs.RetentionDays.ONE_DAY) sqs_to_elasticsearch_service = aws_lambda.Function( self, 'sqs_to_elasticsearch_service', handler='sqs_to_elasticsearch_service.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'), memory_size=4096, timeout=core.Duration.seconds(301), log_retention=logs.RetentionDays.ONE_DAY) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### ########################################################################### # AMAZON S3 BUCKETS ########################################################################### cloudtrail_log_bucket = aws_s3.Bucket(self, "cloudtrail_log_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*", "es:*"], resources=["*"]) sqs_to_elastic_cloud.add_to_role_policy( lambda_supplemental_policy_statement) sqs_to_elasticsearch_service.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### cloudtrail_log_topic = aws_sns.Topic(self, "cloudtrail_log_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### cloudtrail_log_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.SnsDestination(cloudtrail_log_topic)) ########################################################################### # AWS SQS QUEUES ########################################################################### sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue_dlq") sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elasticsearch_service_queue_iqueue) sqs_to_elasticsearch_service_queue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq) sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue_dlq") sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue) sqs_to_elastic_cloud_queue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=sqs_to_elastic_cloud_queue_dlq) ########################################################################### # AWS SNS TOPIC SUBSCRIPTIONS ########################################################################### cloudtrail_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue)) cloudtrail_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription( sqs_to_elasticsearch_service_queue)) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### sqs_to_elastic_cloud.add_event_source( SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10)) sqs_to_elasticsearch_service.add_event_source( SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### ########################################################################### # AWS ELASTICSEARCH DOMAIN ACCESS POLICY ########################################################################### this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912") s3_to_elasticsearch_cloudtrail_logs_domain = aws_elasticsearch.Domain( self, "s3-to-elasticsearch-cloudtrail-logs-domain", version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3_to_elasticsearch_user_pool = aws_cognito.UserPool( self, "s3-to-elasticsearch-cloudtrial-logs-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) sqs_to_elasticsearch_service.add_environment( "ELASTICSEARCH_HOST", s3_to_elasticsearch_cloudtrail_logs_domain.domain_endpoint) sqs_to_elasticsearch_service.add_environment( "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url) sqs_to_elasticsearch_service.add_environment("DEBUG", "False") sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-") sqs_to_elastic_cloud.add_environment( "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url) sqs_to_elastic_cloud.add_environment("DEBUG", "False") ########################################################################### # AWS COGNITO USER POOL ########################################################################### allevents_trail = aws_cloudtrail.Trail( self, "allevents_trail", bucket=cloudtrail_log_bucket, cloud_watch_log_group=None, cloud_watch_logs_retention=None, enable_file_validation=None, encryption_key=None, include_global_service_events=None, is_multi_region_trail=True, kms_key=None, management_events=aws_cloudtrail.ReadWriteType("ALL"), s3_key_prefix=None, send_to_cloud_watch_logs=False, sns_topic=None, trail_name=None)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ### S3 ### source_csv_bucket = _s3.Bucket(self, "BYODValidationSourceBucket", versioned=True) target_csv_bucket = _s3.Bucket( self, "BYODValidationTargetBucket", removal_policy=core.RemovalPolicy.RETAIN) webtool_bucket = _s3.Bucket( self, "WebToolBucket", website_index_document="index.html", website_error_document="index.html", public_read_access=True, ) core.CfnOutput(self, "DVTRegion", value=self.region) core.CfnOutput(self, "SourceS3Bucket", value=source_csv_bucket.bucket_name) core.CfnOutput(self, "TargetS3Bucket", value=target_csv_bucket.bucket_name) core.CfnOutput(self, "WebToolS3Bucket", value=webtool_bucket.bucket_name) core.CfnOutput(self, "WebToolUrl", value=webtool_bucket.bucket_website_url) ### Stager Function ### stager_function = _lambda.Function(self, "StagerFunction", runtime=_lambda.Runtime.NODEJS_12_X, code=_lambda.Code.from_asset( os.path.join( dirname, "lambda", "stager")), handler='index.handler') stager_function.add_environment("REGION", self.region) stager_function.add_environment("SOURCE_BUCKET", source_csv_bucket.bucket_name) stager_function.add_environment("STAGE_BUCKET", target_csv_bucket.bucket_name) source_csv_bucket.grant_read(stager_function) target_csv_bucket.grant_put(stager_function) core.CfnOutput(self, "StagerLambdaFunction", value=stager_function.function_name) ### Profiling Queue profiling_job_queue = _sqs.Queue(self, "ProfilingJobQueue") core.CfnOutput(self, "SQSProfileQueue", value=profiling_job_queue.queue_url) ### Cognito ### userpool = _cognito.UserPool(self, "WebToolUserPool", user_pool_name="byod-webtool-userpool", self_sign_up_enabled=True, auto_verify={ "email": True, "phone": False }, user_verification={ "email_subject": "Your verification code", "email_body": "Your verification code is {####}", "email_style": _cognito.VerificationEmailStyle.CODE }, standard_attributes={ "email": { "required": True, "mutable": False } }, password_policy={}) client = userpool.add_client( "webtool-app-client", auth_flows={ "custom": True, "user_password": True, "user_srp": True, #"refresh_token": True }) identity_pool = _cognito.CfnIdentityPool( self, "WebToolCognitoIdentityPool", allow_unauthenticated_identities=True) identity_pool.add_property_override( "CognitoIdentityProviders", [{ "ClientId": client.user_pool_client_id, "ProviderName": userpool.user_pool_provider_name }]) auth_role = _iam.Role( self, "CognitoAuthRole", assumed_by=WebIdentityPrincipal( "cognito-identity.amazonaws.com", { "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" } })) auth_role.add_to_policy( PolicyStatement(effect=Effect.ALLOW, actions=["s3:GetObject", "s3:PutObject"], resources=["%s/*" % target_csv_bucket.bucket_arn])) auth_role.add_to_policy( PolicyStatement(effect=Effect.ALLOW, actions=["lambda:invokeFunction"], resources=[stager_function.function_arn])) auth_role.add_to_policy( PolicyStatement(effect=Effect.ALLOW, actions=["sqs:*"], resources=[profiling_job_queue.queue_arn])) unauth_role = _iam.Role( self, "CognitoUnauthRole", assumed_by=_iam.WebIdentityPrincipal( "cognito-identity.amazonaws.com", conditions={ "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "unauthenticated" } })) identity_pool_policy = _cognito.CfnIdentityPoolRoleAttachment( self, "WebToolCognitoIdentityPoolPolicy", identity_pool_id=identity_pool.ref, roles={ 'unauthenticated': unauth_role.role_arn, 'authenticated': auth_role.role_arn }) core.CfnOutput(self, "UserPoolId", value=userpool.user_pool_id) core.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref) core.CfnOutput(self, "ClientId", value=client.user_pool_client_id) core.CfnOutput(self, "ProviderName", value=userpool.user_pool_provider_name) ### DynamoDB ### validation_job_table = _dynamodb.Table( self, "ValidationJobTable", partition_key=_dynamodb.Attribute( name="id", type=_dynamodb.AttributeType.STRING)) ## AppSync ### api = _appsync.GraphqlApi( self, "Api", name="validation-job-api", schema=_appsync.Schema.from_asset( os.path.join(dirname, "api", "schema.graphql")), authorization_config=AuthorizationConfig( default_authorization=AuthorizationMode( authorization_type=AuthorizationType.USER_POOL, user_pool_config=UserPoolConfig(user_pool=userpool))), log_config=LogConfig(exclude_verbose_content=False, field_log_level=FieldLogLevel.ALL)) api_ds = api.add_dynamo_db_data_source("ValidationJobDataSource", validation_job_table) from aws_cdk.aws_appsync import MappingTemplate api_ds.create_resolver( type_name="Query", field_name="listJobss", request_mapping_template=MappingTemplate.from_file( os.path.join(dirname, "api", "resolvers", "Query.listJobss.req.vtl")), response_mapping_template=MappingTemplate.from_file( os.path.join(dirname, "api", "resolvers", "Query.listJobss.res.vtl"))) api_ds.create_resolver( type_name="Query", field_name="getJobs", request_mapping_template=MappingTemplate.from_file( os.path.join(dirname, "api", "resolvers", "Query.getJobs.req.vtl")), response_mapping_template=MappingTemplate.from_file( os.path.join(dirname, "api", "resolvers", "Query.getJobs.res.vtl"))) core.CfnOutput(self, "GraphQLEndpoint", value=api.graphql_url) ### SQS ### validation_job_queue = _sqs.Queue(self, "ValidationJobQueue") ### Lambda ### validation_trigger_function = _lambda.Function( self, "ValidationTriggerFunction", runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( os.path.join(dirname, "lambda", "validation_trigger")), handler='lambda_function.lambda_handler') validation_trigger_function.add_environment( "TABLE_NAME", validation_job_table.table_name) validation_trigger_function.add_environment( "QUEUE_URL", validation_job_queue.queue_url) validation_trigger_function.add_event_source( _S3EventSource(source_csv_bucket, events=[_s3.EventType.OBJECT_CREATED])) source_csv_bucket.grant_read(validation_trigger_function) validation_job_table.grant_read_write_data(validation_trigger_function) validation_job_queue.grant_send_messages(validation_trigger_function) ### ECS Fargate ### validation_fargate_asset = _ecr_assets.DockerImageAsset( self, "ValidationBuildImage", directory=os.path.join(dirname, "fargate", "validation")) profiling_fargate_asset = _ecr_assets.DockerImageAsset( self, "ProfilingBuildImage", directory=os.path.join(dirname, "fargate", "profiling")) vpc = _ec2.Vpc(self, "VPC", max_azs=3) cluster = _ecs.Cluster(self, "ECSCluster", vpc=vpc) validation_fargate_service = _ecs_patterns.QueueProcessingFargateService( self, "ValidationFargateService", cluster=cluster, cpu=4096, memory_limit_mib=30720, enable_logging=True, image=_ecs.ContainerImage.from_docker_image_asset( validation_fargate_asset), environment={ "TABLE_NAME": validation_job_table.table_name, "QUEUE_URL": validation_job_queue.queue_url, "SOURCE_BUCKET_NAME": source_csv_bucket.bucket_name, "TARGET_BUCKET_NAME": target_csv_bucket.bucket_name, "REGION": self.region }, queue=validation_job_queue, max_scaling_capacity=2, max_healthy_percent=200, min_healthy_percent=66) validation_fargate_service.task_definition.task_role.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonDynamoDBFullAccess")) validation_fargate_service.task_definition.task_role.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess")) profiling_fargate_service = _ecs_patterns.QueueProcessingFargateService( self, "ProfilingFargateService", cluster=cluster, cpu=4096, memory_limit_mib=30720, enable_logging=True, image=_ecs.ContainerImage.from_docker_image_asset( profiling_fargate_asset), environment={ "TABLE_NAME": validation_job_table.table_name, "QUEUE_URL": profiling_job_queue.queue_url, "SOURCE_BUCKET_NAME": source_csv_bucket.bucket_name, "TARGET_BUCKET_NAME": target_csv_bucket.bucket_name, "REGION": self.region }, queue=profiling_job_queue, max_scaling_capacity=2, max_healthy_percent=200, min_healthy_percent=66) profiling_fargate_service.task_definition.task_role.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonDynamoDBFullAccess")) profiling_fargate_service.task_definition.task_role.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess"))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # ----------------------------------- # Cognito User Pool # ----------------------------------- userpool = cognito.UserPool( self, "ServerlessTodoUserPool", user_pool_name="ServerlessTodoUserPool", sign_in_aliases=cognito.SignInAliases(username=True, email=True), password_policy=cognito.PasswordPolicy( min_length=6, require_digits=True, require_lowercase=True, require_symbols=True, require_uppercase=True, temp_password_validity=core.Duration.days(7)), auto_verify=cognito.AutoVerifiedAttrs(email=True), standard_attributes=cognito.StandardAttributes( email=cognito.StandardAttribute(mutable=True, required=True), family_name=cognito.StandardAttribute(mutable=True, required=True), given_name=cognito.StandardAttribute(mutable=True, required=True))) user_pool_client = userpool.add_client( "UserPoolClient", auth_flows=cognito.AuthFlow(admin_user_password=True)) # ----------------------------------- # dynamodb # ----------------------------------- dynamodbTable = dynamodb.Table( self, "TaskTable", partition_key=dynamodb.Attribute( name="id", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="meta", type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, point_in_time_recovery=True, server_side_encryption=True) dynamodbTable.add_global_secondary_index( partition_key=dynamodb.Attribute( name="meta", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="id", type=dynamodb.AttributeType.STRING), index_name="meta-id-index") dynamodbTable.add_global_secondary_index( partition_key=dynamodb.Attribute( name="owner", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="meta", type=dynamodb.AttributeType.STRING), index_name="owner-meta-index") # ----------------------------------- # apigateway # ----------------------------------- acm_arn = self.node.try_get_context('acm_arn') domain_name = self.node.try_get_context("domain_name") hosted_zone = self.node.try_get_context("hosted_zone") api_policy = iam.PolicyDocument( statements=iam.PolicyStatement(actions=["lambda:InvokeFunction"], ) .add_resources("arn:aws:lambda:{}:{}:function:*".format( self.region, self.account))) if acm_arn and domain_name and hosted_zone: api = apigw.RestApi( self, 'API', domain_name=apigw.DomainNameOptions( certificate=acm.Certificate.from_certificate_arn( self, 'ApiCertificate', acm_arn), domain_name=domain_name, endpoint_type=apigw.EndpointType.REGIONAL), deploy_options=apigw.StageOptions(metrics_enabled=True), policy=api_policy, rest_api_name="Serverless TODO API", endpoint_types=[apigw.EndpointType.REGIONAL], default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors. ALL_ORIGINS, # TODO: Temporary for development allow_headers=[ "Content-Type", "X-Amz-Date", "Authorization", "X-Api-Key", "X-Amz-Security-Token", "X-Tracing-Id", "x-jeffy-correlation-id", "x-amzn-trace-id" ], allow_methods=apigw.Cors.ALL_METHODS, allow_credentials=True)) route53.CfnRecordSet( self, "apiDomainRecord", name=domain_name, type="A", alias_target={ "dnsName": api.domain_name.domain_name_alias_domain_name, "hostedZoneId": api.domain_name.domain_name_alias_hosted_zone_id }, hosted_zone_id=hosted_zone, ) else: api = apigw.RestApi( self, 'API', deploy_options=apigw.StageOptions(metrics_enabled=True), policy=api_policy, rest_api_name="Serverless TODO API", endpoint_types=[apigw.EndpointType.REGIONAL], default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors. ALL_ORIGINS, # TODO: Temporary for development allow_headers=[ "Content-Type", "X-Amz-Date", "Authorization", "X-Api-Key", "X-Amz-Security-Token", "X-Tracing-Id", "x-jeffy-correlation-id", "x-amzn-trace-id" ], allow_methods=apigw.Cors.ALL_METHODS, allow_credentials=True)) cognito_authorizer = apigw.CognitoUserPoolsAuthorizer( self, "CognitoAuthorizer", cognito_user_pools=[userpool], authorizer_name='todo_cognito_authorizer', identity_source='method.request.header.Authorization', results_cache_ttl=core.Duration.minutes(60)) api_role = iam.Role(self, "ApiRole", assumed_by=iam.ServicePrincipal( service="apigateway.amazonaws.com")) api_statement = iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[ "arn:aws:lambda:{}:{}:function:*".format( self.region, self.account) ]) api_role.add_to_policy(api_statement) # ----------------------------------- # lambda common configure # ----------------------------------- env = { "TABLE_NAME": dynamodbTable.table_name, "USER_POOL_ID": userpool.user_pool_id, "USER_POOL_NAME": userpool.user_pool_provider_name, "CLIENT_ID": user_pool_client.user_pool_client_id } # ----------------------------------- # get handler # ----------------------------------- get_resource_base_name = "getTaskFunction" get_task_func = lambda_.Function( self, get_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="get.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) get_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, get_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + get_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) task_path = api.root.add_resource("task") task_id_path = task_path.add_resource("{task_id}") get_task_integration = apigw.LambdaIntegration( get_task_func, credentials_role=api_role) task_id_path.add_method( "GET", integration=get_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # create handler # ----------------------------------- create_resource_base_name = "createTaskFunction" create_task_func = lambda_.Function( self, create_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="create.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) create_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, create_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + create_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) create_task_integration = apigw.LambdaIntegration( create_task_func, credentials_role=api_role) task_path.add_method( "POST", integration=create_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # update handler # ----------------------------------- update_resource_base_name = "updateTaskFunction" update_task_func = lambda_.Function( self, update_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="update.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) update_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, update_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + update_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) update_task_integration = apigw.LambdaIntegration( update_task_func, credentials_role=api_role) task_id_path.add_method( "POST", integration=update_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # delete handler # ----------------------------------- delete_resource_base_name = "deleteTaskFunction" delete_task_func = lambda_.Function( self, delete_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="delete.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) delete_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, delete_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + delete_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) delete_task_integration = apigw.LambdaIntegration( delete_task_func, credentials_role=api_role) task_id_path.add_method( "DELETE", integration=delete_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # search handler # ----------------------------------- search_resource_base_name = "searchTaskFunction" search_task_func = lambda_.Function( self, search_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="search.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) search_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, search_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + search_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) search_task_integration = apigw.LambdaIntegration( search_task_func, credentials_role=api_role) tasks_path = api.root.add_resource("tasks") tasks_path.add_method( "GET", integration=search_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # login handler # ----------------------------------- login_resource_base_name = "loginFunction" login_task_func = lambda_.Function( self, login_resource_base_name, code=lambda_.Code.from_asset( 'function/src/user', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="login.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) login_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['cognito-idp:AdminInitiateAuth'], resources=[userpool.user_pool_arn])) logs.LogGroup(self, login_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + login_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) login_task_integration = apigw.LambdaIntegration(login_task_func) auth_path = api.root.add_resource("auth") auth_login_path = auth_path.add_resource("login") auth_login_path.add_method("POST", integration=login_task_integration)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here TODOLIST_TABLE_NAME = "todolist" TODOLIST_TABLE_PARTITION_KEY = "userId" TODOLIST_TABLE_SORT_KEY = "todoId" table = aws_dynamodb.Table(self, "Todo", table_name="todolist", partition_key=aws_dynamodb.Attribute( name=TODOLIST_TABLE_PARTITION_KEY, type=aws_dynamodb.AttributeType.STRING), sort_key=aws_dynamodb.Attribute( name=TODOLIST_TABLE_SORT_KEY, type=aws_dynamodb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY) user_pool = aws_cognito.UserPool( self, "todo_list", self_sign_up_enabled=True, sign_in_aliases=aws_cognito.SignInAliases(email=True)) sign_up_lambda_function = aws_lambda.Function( self, "signup", runtime=aws_lambda.Runtime.PYTHON_3_8, handler="signup.main", code=aws_lambda.Code.asset("./lambda")) confirm_sign_up_lambda_function = aws_lambda.Function( self, "confirm", runtime=aws_lambda.Runtime.PYTHON_3_8, handler="confirm.main", code=aws_lambda.Code.asset("./lambda")) auth_lambda_function = aws_lambda.Function( self, "auth", runtime=aws_lambda.Runtime.PYTHON_3_8, handler="auth.main", code=aws_lambda.Code.asset("./lambda")) create_todo_list_lambda_function = aws_lambda.Function( self, "create", runtime=aws_lambda.Runtime.PYTHON_3_8, handler="lambda/create.main", environment=dict(TABLE_NAME=table.table_name), code=aws_lambda.Code.from_asset("./lambdas.zip")) get_all_todo_lambda_function = aws_lambda.Function( self, "getall", runtime=aws_lambda.Runtime.PYTHON_3_8, handler="lambda/get_all_todo.main", environment=dict(TABLE_NAME=table.table_name), code=aws_lambda.Code.from_asset("./lambdas.zip")) table.grant_write_data(create_todo_list_lambda_function) table.grant_read_data(get_all_todo_lambda_function) api = aws_apigateway.RestApi( self, "todolistRestApi", rest_api_name="Todolist API", default_cors_preflight_options={ "allow_origins": aws_apigateway.Cors.ALL_ORIGINS, "allow_methods": aws_apigateway.Cors.ALL_METHODS, "allow_headers": [ "client-id", "Content-Type", "X-Amz-Date", "Authorization", "X-Api-Key", "X-Amz-Security-Token", "X-Amz-User-Agent" ] } # default_cors_preflight_options={ # "allow_origins": ["*"], # "allow_methods": ["GET", "POST", "OPTIONS"], # "allow_headers": ["Content-Type", "X-Amz-Date", "Authorization", "X-Api-Key", "X-Amz-Security-Token", "X-Amz-User-Agent"] # } ) sign_up_lambda_integration = aws_apigateway.LambdaIntegration( sign_up_lambda_function) api.root.add_resource("signup").add_method("POST", sign_up_lambda_integration) confirm_sign_up_lambda_integration = aws_apigateway.LambdaIntegration( confirm_sign_up_lambda_function) api.root.add_resource("confirm").add_method( "POST", confirm_sign_up_lambda_integration) auth_lambda_integration = aws_apigateway.LambdaIntegration( auth_lambda_function) api.root.add_resource("auth").add_method("POST", auth_lambda_integration) todolist_api = api.root.add_resource("todolists") create_todo_list_lambda_integration = aws_apigateway.LambdaIntegration( create_todo_list_lambda_function) todolist_api.add_method("POST", create_todo_list_lambda_integration) get_all_todo_lambda_integration = aws_apigateway.LambdaIntegration( get_all_todo_lambda_function) todolist_api.add_method("GET", get_all_todo_lambda_integration)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) with open("stack/config.yml", 'r') as stream: configs = yaml.safe_load(stream) ### S3 core images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES") images_S3_bucket.add_cors_rule( allowed_methods=[_s3.HttpMethods.POST], allowed_origins=["*"] # add API gateway web resource URL ) ### SQS core image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE") image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE", dead_letter_queue={ "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"], "queue": image_deadletter_queue }) ### api gateway core api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway') api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"]) api_gateway_landing_page_resource = api_gateway_resource.add_resource('web') api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl') api_gateway_image_search_resource = api_gateway_resource.add_resource('search') ### landing page function get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE", function_name="ICS_GET_LANDING_PAGE", runtime=Runtime.PYTHON_3_7, handler="main.handler", code=Code.asset("./src/landingPage")) get_landing_page_integration = LambdaIntegration( get_landing_page_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }]) ### cognito required_attribute = _cognito.StandardAttribute(required=True) users_pool = _cognito.UserPool(self, "ICS_USERS_POOL", auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up self_sign_up_enabled=configs["Cognito"]["SelfSignUp"]) user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", supported_identity_providers=["COGNITO"], allowed_o_auth_flows=["implicit"], allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"], user_pool_id=users_pool.user_pool_id, callback_ur_ls=[api_gateway_landing_page_resource.url], allowed_o_auth_flows_user_pool_client=True, explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"]) user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", user_pool=users_pool, cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"])) ### get signed URL function get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL", function_name="ICS_GET_SIGNED_URL", environment={ "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name, "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"] }, runtime=Runtime.PYTHON_3_7, handler="main.handler", code=Code.asset("./src/getSignedUrl")) get_signedurl_integration = LambdaIntegration( get_signedurl_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER", rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id, name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER", type="COGNITO_USER_POOLS", identity_source="method.request.header.Authorization", provider_arns=[users_pool.user_pool_arn]) api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration, authorization_type=AuthorizationType.COGNITO, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }] ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref) images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*") ### image massage function image_massage_function = Function(self, "ICS_IMAGE_MASSAGE", function_name="ICS_IMAGE_MASSAGE", timeout=core.Duration.seconds(6), runtime=Runtime.PYTHON_3_7, environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name}, handler="main.handler", code=Code.asset("./src/imageMassage")) images_S3_bucket.grant_write(image_massage_function, "processed/*") images_S3_bucket.grant_delete(image_massage_function, "new/*") images_S3_bucket.grant_read(image_massage_function, "new/*") new_image_added_notification = _s3notification.LambdaDestination(image_massage_function) images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, new_image_added_notification, _s3.NotificationKeyFilter(prefix="new/") ) image_queue.grant_send_messages(image_massage_function) ### image analyzer function image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS", function_name="ICS_IMAGE_ANALYSIS", runtime=Runtime.PYTHON_3_7, timeout=core.Duration.seconds(10), environment={ "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name, "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"], "REGION": core.Aws.REGION, }, handler="main.handler", code=Code.asset("./src/imageAnalysis")) image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10)) image_queue.grant_consume_messages(image_massage_function) lambda_rekognition_access = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"], resources=["*"] ) image_analyzer_function.add_to_role_policy(lambda_rekognition_access) images_S3_bucket.grant_read(image_analyzer_function, "processed/*") ### API gateway finalizing self.add_cors_options(api_gateway_get_signedurl_resource) self.add_cors_options(api_gateway_landing_page_resource) self.add_cors_options(api_gateway_image_search_resource) ### database database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET", secret_name="rds-db-credentials/image-content-search-rds-secret", generate_secret_string=_secrets_manager.SecretStringGenerator( generate_string_key='password', secret_string_template='{"username": "******"}', exclude_punctuation=True, exclude_characters='/@\" \\\'', require_each_included_type=True ) ) database = _rds.CfnDBCluster(self, "ICS_DATABASE", engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type, engine_mode="serverless", database_name=configs["Database"]["Name"], enable_http_endpoint=True, deletion_protection=configs["Database"]["DeletionProtection"], master_username=database_secret.secret_value_from_json("username").to_string(), master_user_password=database_secret.secret_value_from_json("password").to_string(), scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty( auto_pause=configs["Database"]["Scaling"]["AutoPause"], min_capacity=configs["Database"]["Scaling"]["Min"], max_capacity=configs["Database"]["Scaling"]["Max"], seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"] ), ) database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref) secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET", target_type="AWS::RDS::DBCluster", target_id=database.ref, secret_id=database_secret.secret_arn ) secret_target.node.add_dependency(database) ### database function image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE", role_name="ICS_IMAGE_DATA_FUNCTION_ROLE", assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"), _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"), _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess") ] ) image_data_function = Function(self, "ICS_IMAGE_DATA", function_name="ICS_IMAGE_DATA", runtime=Runtime.PYTHON_3_7, timeout=core.Duration.seconds(5), role=image_data_function_role, environment={ "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"], "CLUSTER_ARN": database_cluster_arn, "CREDENTIALS_ARN": database_secret.secret_arn, "DB_NAME": database.database_name, "REGION": core.Aws.REGION }, handler="main.handler", code=Code.asset("./src/imageData") ) image_search_integration = LambdaIntegration( image_data_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER", rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id, name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER", type="COGNITO_USER_POOLS", identity_source="method.request.header.Authorization", provider_arns=[users_pool.user_pool_arn]) api_gateway_image_search_resource.add_method('POST', image_search_integration, authorization_type=AuthorizationType.COGNITO, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }] ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref) lambda_access_search = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["translate:TranslateText"], resources=["*"] ) image_data_function.add_to_role_policy(lambda_access_search) ### custom resource lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', on_event_handler=image_data_function ) core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', service_token=lambda_provider.service_token, pascal_case_properties=False, resource_type="Custom::SchemaCreation", properties={ "source": "Cloudformation" } ) ### event bridge event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS") event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE", rule_name="ICS_IMAGE_CONTENT_RULE", description="The event from image analyzer to store the data", event_bus=event_bus, event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]), ) event_rule.add_target(_event_targets.LambdaFunction(image_data_function)) event_bus.grant_put_events(image_analyzer_function) image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name) ### outputs core.CfnOutput(self, 'CognitoHostedUILogin', value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url), description='The Cognito Hosted UI Login Page' )
def __init__(self, scope: core.Construct, id: str, instance_id: str, contact_flow_id: str, source_phone_number: str, timeout: int, **kwargs): super().__init__(scope, id, **kwargs) web_bucket = _s3.Bucket(self, "StaticWebBucket", website_index_document="index.html", website_error_document="index.html", removal_policy=core.RemovalPolicy.DESTROY, public_read_access=True) core.CfnOutput(self, 'WebBucketUrl', value=web_bucket.bucket_domain_name) web_distribution = _clf.CloudFrontWebDistribution( self, 'StaticWebDistribution', origin_configs=[ _clf.SourceConfiguration( s3_origin_source=_clf.S3OriginConfig( s3_bucket_source=web_bucket), behaviors=[_clf.Behavior(is_default_behavior=True)]) ], viewer_protocol_policy=_clf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS) _s3d.BucketDeployment( self, "S3StaticWebContentDeploymentWithInvalidation", sources=[ _s3d.Source.asset( f"{pathlib.Path(__file__).parent.absolute()}/site-content/build" ) ], destination_bucket=web_bucket, distribution=web_distribution, distribution_paths=["/*"]) file_bucket = _s3.Bucket(self, "FileBucket", removal_policy=core.RemovalPolicy.DESTROY) call_dead_letter_queue = _sqs.Queue(self, "CallDeadLetterQueue", fifo=True, content_based_deduplication=True) call_sqs_queue = _sqs.Queue( self, "CallSqsQueue", fifo=True, content_based_deduplication=True, visibility_timeout=core.Duration.seconds(120), dead_letter_queue=_sqs.DeadLetterQueue( max_receive_count=1, queue=call_dead_letter_queue)) async_call_dead_letter_queue = _sqs.Queue( self, "AsyncCallDeadLetterQueue", fifo=True, content_based_deduplication=True) async_callout_queue = _sqs.Queue( self, "AsyncCalloutQueue", fifo=True, content_based_deduplication=True, visibility_timeout=core.Duration.seconds(120), dead_letter_queue=_sqs.DeadLetterQueue( max_receive_count=1, queue=async_call_dead_letter_queue)) call_job_complete_sns_topic = _sns.Topic( self, "CallJobCompleteSnsTopic", display_name="CallJobCompletion") call_result_table = _dynamodb.Table( self, "CallResultDynamodbTable", billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST, partition_key=_dynamodb.Attribute( name="task_id", type=_dynamodb.AttributeType.STRING), sort_key=_dynamodb.Attribute(name="receiver_id", type=_dynamodb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY) callout_record_table = _dynamodb.Table( self, "CallTaskDynamodbTable", billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST, partition_key=_dynamodb.Attribute( name="task_id", type=_dynamodb.AttributeType.STRING), sort_key=_dynamodb.Attribute(name="created_at", type=_dynamodb.AttributeType.NUMBER), removal_policy=core.RemovalPolicy.DESTROY) callout_record_table.add_global_secondary_index( partition_key=_dynamodb.Attribute( name='call_type', type=_dynamodb.AttributeType.STRING), sort_key=_dynamodb.Attribute(name='created_at', type=_dynamodb.AttributeType.NUMBER), index_name='CallTypeCreatedAtGlobalIndex', projection_type=_dynamodb.ProjectionType.ALL) python_function_layer = _lambda.LayerVersion( self, "LambdaPythonFunctionLayer", code=_lambda.Code.asset("aws_callouts_cdk/layer/_python"), compatible_runtimes=[ _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8 ], license="Available under the MIT-0 license") nodejs_function_layer = _lambda.LayerVersion( self, "LambdaNodeJsFunctionLayer", code=_lambda.Code.asset("aws_callouts_cdk/layer/_nodejs"), compatible_runtimes=[ _lambda.Runtime.NODEJS_10_X, _lambda.Runtime.NODEJS_12_X ], license="Available under the MIT-0 license") global_python_function_arguments = { "code": _lambda.Code.asset("aws_callouts_cdk/src/python"), "layers": [python_function_layer], "runtime": _lambda.Runtime.PYTHON_3_7 } global_nodeje_function_arguments = { "code": _lambda.Code.asset("aws_callouts_cdk/src/nodejs"), "layers": [nodejs_function_layer], "runtime": _lambda.Runtime.NODEJS_12_X } get_callout_job_function = _lambda.Function( self, "GetCalloutJobFunction", handler="get_call_job.lambda_handler", **global_python_function_arguments) get_callout_job_function.add_environment(key="S3Bucket", value=file_bucket.bucket_name) file_bucket.grant_read(get_callout_job_function) callout_function = _lambda.Function(self, "CalloutFunction", handler="send_call.lambda_handler", **global_python_function_arguments) callout_function.add_environment( key="ContactFlowArn", value= f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/contact-flow/{contact_flow_id}" ) callout_function.add_environment(key="SourcePhoneNumber", value=source_phone_number) callout_function.add_environment(key="ExcelFileBucket", value=file_bucket.bucket_name) callout_function.add_environment(key="AsynCalloutQueueUrl", value=async_callout_queue.queue_url) callout_function.add_to_role_policy(statement=_iam.PolicyStatement( resources=[ f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/*" ], actions=["connect:StartOutboundVoiceContact"])) callout_function.add_event_source(source=_les.SqsEventSource( queue=async_callout_queue, batch_size=1)) 'arn:aws:connect:751225572132:ap-southeast-2:instance/9d0c7cc5-7d2a-42e4-a3dd-70f402e0d040' file_bucket.grant_read_write(callout_function) response_handler_function = _lambda.Function( self, "ResponseHandlerFunction", handler="response_handler.lambda_handler", **global_python_function_arguments) response_handler_function.add_permission( id="ResponseHandlerFunctionLambdaInvokePermission", principal=_iam.ServicePrincipal(service="connect.amazonaws.com"), action="lambda:InvokeFunction", source_account=self.account, source_arn= f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}" ) send_task_success_function = _lambda.Function( self, "SendTaskSuccessFunction", handler="send_task_success.lambda_handler", **global_python_function_arguments) send_task_success_function.add_permission( id="SendTaskSuccessFunctionLambdaInvokePermission", principal=_iam.ServicePrincipal(service="connect.amazonaws.com"), action="lambda:InvokeFunction", source_account=self.account, source_arn= f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}" ) get_call_result_function = _lambda.Function( self, "GetCallResultFunction", handler="get_call_result.lambda_handler", memory_size=512, **global_python_function_arguments) get_call_result_function.add_environment( key="CallResultDynamoDBTable", value=call_result_table.table_name) get_call_result_function.add_environment(key="S3Bucket", value=file_bucket.bucket_name) call_result_table.grant_read_data(grantee=get_call_result_function) file_bucket.grant_read_write(get_call_result_function) iterator_function = _lambda.Function( self, "IteratorFunction", handler="iterator.lambda_handler", **global_python_function_arguments) iterator_function.add_permission( id="IteratorFunctionLambdaInvokePermission", principal=_iam.ServicePrincipal(service="connect.amazonaws.com"), action="lambda:InvokeFunction", source_account=self.account, source_arn= f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}" ) create_appsync_call_task_function = _lambda.Function( self, "CreateAppSyncCallTaskFunction", handler="create_appsync_call_task.lambda_handler", **global_nodeje_function_arguments) create_appsync_call_task_function.add_environment( key="CallSqsQueueUrl", value=call_sqs_queue.queue_url) create_appsync_call_task_function.add_environment( key="CallRecordTableName", value=callout_record_table.table_name) call_sqs_queue.grant_send_messages(create_appsync_call_task_function) callout_record_table.grant_write_data( create_appsync_call_task_function) create_call_report_record_function = _lambda.Function( self, "CreateCallReportRecordFunction", handler="create_call_report_record.lambda_handler", **global_nodeje_function_arguments) create_excel_call_task_function = _lambda.Function( self, "CreateExcelCallTaskFunction", handler="create_excel_call_task.lambda_handler", **global_python_function_arguments) create_excel_call_task_function.add_environment( key="CallSqsQueueUrl", value=call_sqs_queue.queue_url) call_sqs_queue.grant_send_messages(create_excel_call_task_function) create_excel_call_task_function.add_event_source( source=_les.S3EventSource(bucket=file_bucket, events=[_s3.EventType.OBJECT_CREATED], filters=[ _s3.NotificationKeyFilter( prefix="call_task", suffix=".xlsx") ])) start_callout_flow_function = _lambda.Function( self, "StartCalloutFlowFunction", handler="start_call_out_flow.lambda_handler", reserved_concurrent_executions=1, **global_python_function_arguments) start_callout_flow_function.add_environment( key="CallSqsQueueUrl", value=call_sqs_queue.queue_url) start_callout_flow_function.add_environment( key="ResponseHandlerFunctionArn", value=response_handler_function.function_arn) start_callout_flow_function.add_environment( key="IteratorFunctionArn", value=iterator_function.function_arn) start_callout_flow_function.add_environment( key="SendTaskSuccessFunctionArn", value=send_task_success_function.function_arn) start_callout_flow_function.add_environment( key="S3Bucket", value=file_bucket.bucket_name) start_callout_flow_function.add_event_source( source=_les.SqsEventSource(queue=call_sqs_queue, batch_size=1)) file_bucket.grant_read_write(start_callout_flow_function) call_state_machine_definition = { "Comment": "Reading messages from an SQS queue and iteratively processing each message.", "StartAt": "Start", "States": { "Start": { "Type": "Pass", "Next": "Process Call Messages" }, "Process Call Messages": { "Type": "Map", "Next": "Get Call Result", "InputPath": "$", "ItemsPath": "$", "OutputPath": "$.[0]", "Iterator": { "StartAt": "Get Call out job", "States": { "Get Call out job": { "Type": "Task", "Resource": get_callout_job_function.function_arn, "Next": "Callout with AWS Connect" }, "Callout with AWS Connect": { "Type": "Task", "Resource": "arn:aws:states:::sqs:sendMessage.waitForTaskToken", "TimeoutSeconds": timeout, "Parameters": { "QueueUrl": async_callout_queue.queue_url, "MessageGroupId": "1", "MessageBody": { "Message.$": "$", "TaskToken.$": "$$.Task.Token" } }, "Catch": [{ "ErrorEquals": ["States.Timeout"], "ResultPath": None, "Next": "Call Timeout" }], "Next": "Save call result" }, "Call Timeout": { "Type": "Pass", "ResultPath": None, "Next": "Save call result" }, "Save call result": { "Type": "Task", "Resource": "arn:aws:states:::dynamodb:putItem", "Parameters": { "TableName": call_result_table.table_name, "Item": { "receiver_id": { "S.$": "$.receiver_id" }, "task_id": { "S.$": "$.task_id" }, "username": { "S.$": "$.username" }, "phone_number": { "S.$": "$.phone_number" }, "status": { "S.$": "$.status" }, "answers": { "S.$": "$.answers" }, "error": { "S.$": "$.error" }, "call_at": { "S.$": "$.call_at" } } }, "ResultPath": "$.Result", "OutputPath": "$.task_id", "End": True } } } }, "Get Call Result": { "Type": "Task", "Resource": get_call_result_function.function_arn, "Next": "Create Call Report Record" }, "Create Call Report Record": { "Type": "Task", "Resource": create_call_report_record_function.function_arn, "Next": "Send Completion message to SNS" }, "Send Completion message to SNS": { "Type": "Task", "Resource": "arn:aws:states:::sns:publish", "Parameters": { "TopicArn": call_job_complete_sns_topic.topic_arn, "Message.$": "$" }, "Next": "Finish" }, "Finish": { "Type": "Succeed" } } } callout_state_machine_role = _iam.Role( self, "CalloutStatesExecutionRole", assumed_by=_iam.ServicePrincipal( f"states.{self.region}.amazonaws.com")) callout_state_machine_role.add_to_policy( _iam.PolicyStatement( actions=[ "sqs:SendMessage", "dynamodb:PutItem", "lambda:InvokeFunction", "SNS:Publish" ], resources=[ async_callout_queue.queue_arn, call_result_table.table_arn, get_callout_job_function.function_arn, get_call_result_function.function_arn, call_job_complete_sns_topic.topic_arn, create_appsync_call_task_function.function_arn, create_call_report_record_function.function_arn ])) callout_state_machine = _sfn.CfnStateMachine( self, "CalloutStateMachine", role_arn=callout_state_machine_role.role_arn, definition_string=json.dumps(call_state_machine_definition)) send_task_success_function.add_to_role_policy( _iam.PolicyStatement(actions=["states:SendTaskSuccess"], resources=[callout_state_machine.ref])) start_callout_flow_function.add_environment( key="CalloutStateMachineArn", value=callout_state_machine.ref) start_callout_flow_function.add_to_role_policy( _iam.PolicyStatement(effect=_iam.Effect.ALLOW, resources=[callout_state_machine.ref], actions=['states:StartExecution'])) user_pool = _cognito.UserPool( self, "UserPool", sign_in_type=_cognito.SignInType.USERNAME) user_pool_client = _cognito.UserPoolClient(self, "UserPoolClient", user_pool=user_pool) appsync_api = _appsync.GraphQLApi( self, "AppSyncApi", name="AWSCalloutApi", user_pool_config=_appsync.UserPoolConfig( user_pool=user_pool, default_action=_appsync.UserPoolDefaultAction.ALLOW), log_config=_appsync.LogConfig( field_log_level=_appsync.FieldLogLevel.ALL), schema_definition_file= f"{pathlib.Path(__file__).parent.absolute()}/schema.graphql") callout_record_ddb_ds = appsync_api.add_dynamo_db_data_source( name="CalloutRecordDdb", description="Callout Record DynamoDB Data Source", table=callout_record_table) callout_record_ddb_ds.create_resolver( type_name="Query", field_name="getLatestCallTaskRecords", request_mapping_template=_appsync.MappingTemplate.from_string( '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"TASK"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}' ), response_mapping_template=_appsync.MappingTemplate. dynamo_db_result_list()) callout_record_ddb_ds.create_resolver( type_name="Query", field_name="getLatestCallReportRecords", request_mapping_template=_appsync.MappingTemplate.from_string( '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"REPORT"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}' ), response_mapping_template=_appsync.MappingTemplate. dynamo_db_result_list()) callout_record_ddb_ds.create_resolver( type_name="Mutation", field_name="createCallReport", request_mapping_template=_appsync.MappingTemplate.from_string( '{"version":"2017-02-28","operation":"PutItem","key":{"task_id":{"S":"${ctx.args.report.task_id}"},"created_at":{"N":"${ctx.args.report.created_at}"}},"attributeValues":$util.dynamodb.toMapValuesJson($ctx.args.report)}' ), response_mapping_template=_appsync.MappingTemplate. dynamo_db_result_item()) call_task_lambda_ds = appsync_api.add_lambda_data_source( name="CallTaskLambda", description="Call Task Lambda Data Source", lambda_function=create_appsync_call_task_function) call_task_lambda_ds.create_resolver( type_name="Mutation", field_name="createCallTask", request_mapping_template=_appsync.MappingTemplate.lambda_request( "$utils.toJson($ctx.args)"), response_mapping_template=_appsync.MappingTemplate.lambda_result()) create_call_report_record_function.add_environment( value=appsync_api.graph_ql_url, key="AppSyncGraphQlApiUrl") create_call_report_record_function.add_to_role_policy( statement=_iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=['appsync:GraphQL'], resources=[ f"{appsync_api.arn}/types/Mutation/fields/createCallReport" ])) core.CfnOutput(self, id="OutputCallSqsQueue", value=call_sqs_queue.queue_arn) core.CfnOutput(self, id="OutputCallJobCompletionSNSTopic", value=call_job_complete_sns_topic.topic_arn) core.CfnOutput(self, id="OutputExcelFileS3Bucket", value=file_bucket.bucket_name) core.CfnOutput(self, id="OutputStaticWebS3Bucket", value=web_bucket.bucket_name) core.CfnOutput(self, id="OutputStaticWebUrl", value=web_bucket.bucket_website_url) identity_pool = _cognito.CfnIdentityPool( self, "IdentityPool", allow_unauthenticated_identities=True, cognito_identity_providers=[ _cognito.CfnIdentityPool.CognitoIdentityProviderProperty( provider_name=user_pool.user_pool_provider_name, client_id=user_pool_client.user_pool_client_id) ]) identity_pool_unauthorized_role = _iam.Role( self, 'IdentityPoolUnAuthorizedRole', assumed_by=_iam.FederatedPrincipal( federated="cognito-identity.amazonaws.com", assume_role_action="sts:AssumeRoleWithWebIdentity", conditions={ "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "unauthenticated" } })) identity_pool_unauthorized_role.add_to_policy( _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["appsync:GraphQL"], resources=[ f"{appsync_api.arn}/types/*", # f"{appsync_api.arn}/types/Query/fields/getLatestCallTaskRecords", # f"{appsync_api.arn}/types/Query/fields/getLatestCallReportRecords", # f"{appsync_api.arn}/types/Mutation/fields/createCallRecord", # f"{appsync_api.arn}/types/Subscription/fields/createCallTask", # f"{appsync_api.arn}/types/Subscription/fields/createCallReport" ])) _cognito.CfnIdentityPoolRoleAttachment( self, "CognitoIdentityPoolRoleAttachment", identity_pool_id=identity_pool.ref, roles={ "unauthenticated": identity_pool_unauthorized_role.role_arn }) core.CfnOutput(self, id="UserPoolId", value=user_pool.user_pool_id) core.CfnOutput(self, id="UserPoolClientId", value=user_pool_client.user_pool_client_id) core.CfnOutput(self, id="IdentityPoolId", value=identity_pool.ref)
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #XXX: For createing Amazon MWAA in the existing VPC, # remove comments from the below codes and # comments out vpc = aws_ec2.Vpc(..) codes, # then pass -c vpc_name=your-existing-vpc to cdk command # for example, # cdk -c vpc_name=your-existing-vpc syth # # vpc_name = self.node.try_get_context('vpc_name') # vpc = aws_ec2.Vpc.from_lookup(self, 'ExistingVPC', # is_default=True, # vpc_name=vpc_name # ) vpc = aws_ec2.Vpc(self, "ApiGatewayDynamoDBVPC", max_azs=2, gateway_endpoints={ "S3": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.S3 ), "DynamoDB": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB ) } ) DDB_TABLE_SUFFIX = ''.join(random.sample((string.ascii_lowercase + string.digits), k=7)) DDB_TABLE_NAME = "Comments-{}".format(DDB_TABLE_SUFFIX) ddb_table = aws_dynamodb.Table(self, "DynamoDbTable", table_name=DDB_TABLE_NAME, removal_policy=cdk.RemovalPolicy.DESTROY, partition_key=aws_dynamodb.Attribute(name="commentId", type=aws_dynamodb.AttributeType.STRING), time_to_live_attribute="ttl", billing_mode=aws_dynamodb.BillingMode.PROVISIONED, read_capacity=15, write_capacity=5, ) ddb_table.add_global_secondary_index( read_capacity=15, write_capacity=5, index_name="pageId-index", partition_key=aws_dynamodb.Attribute(name='pageId', type=aws_dynamodb.AttributeType.STRING), projection_type=aws_dynamodb.ProjectionType.ALL ) user_pool = aws_cognito.UserPool(self, 'UserPool', user_pool_name='UserPoolForApiGateway', removal_policy=cdk.RemovalPolicy.DESTROY, self_sign_up_enabled=True, sign_in_aliases={'email': True}, auto_verify={'email': True}, password_policy={ 'min_length': 8, 'require_lowercase': False, 'require_digits': False, 'require_uppercase': False, 'require_symbols': False, }, account_recovery=aws_cognito.AccountRecovery.EMAIL_ONLY ) user_pool_client = aws_cognito.UserPoolClient(self, 'UserPoolClient', user_pool=user_pool, auth_flows={ 'admin_user_password': True, 'user_password': True, 'custom': True, 'user_srp': True }, supported_identity_providers=[aws_cognito.UserPoolClientIdentityProvider.COGNITO] ) auth = aws_apigateway.CognitoUserPoolsAuthorizer(self, 'AuthorizerForDynamoDBApi', cognito_user_pools=[user_pool] ) ddb_access_policy_doc = aws_iam.PolicyDocument() ddb_access_policy_doc.add_statements(aws_iam.PolicyStatement(**{ "effect": aws_iam.Effect.ALLOW, "resources": [ddb_table.table_arn], "actions": [ "dynamodb:DeleteItem", "dynamodb:PartiQLInsert", "dynamodb:UpdateTimeToLive", "dynamodb:BatchWriteItem", "dynamodb:PutItem", "dynamodb:PartiQLUpdate", "dynamodb:UpdateItem", "dynamodb:PartiQLDelete" ] })) apigw_dynamodb_role = aws_iam.Role(self, "ApiGatewayRoleForDynamoDB", role_name='APIGatewayRoleForDynamoDB', assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'), inline_policies={ 'DynamoDBAccessPolicy': ddb_access_policy_doc }, managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBReadOnlyAccess'), ] ) dynamodb_api = aws_apigateway.RestApi(self, "DynamoDBProxyAPI", rest_api_name="comments-api", description="An Amazon API Gateway REST API that integrated with an Amazon DynamoDB.", endpoint_types=[aws_apigateway.EndpointType.REGIONAL], default_cors_preflight_options={ "allow_origins": aws_apigateway.Cors.ALL_ORIGINS }, deploy=True, deploy_options=aws_apigateway.StageOptions(stage_name="v1"), endpoint_export_name="DynamoDBProxyAPIEndpoint" ) all_resources = dynamodb_api.root.add_resource("comments") one_resource = all_resources.add_resource("{pageId}") apigw_error_responses = [ aws_apigateway.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"), aws_apigateway.IntegrationResponse(status_code="500", selection_pattern="5\d{2}") ] apigw_ok_responses = [ aws_apigateway.IntegrationResponse( status_code="200" ) ] ddb_put_item_options = aws_apigateway.IntegrationOptions( credentials_role=apigw_dynamodb_role, integration_responses=[*apigw_ok_responses, *apigw_error_responses], request_templates={ 'application/json': json.dumps({ "TableName": DDB_TABLE_NAME, "Item": { "commentId": { "S": "$context.requestId" }, "pageId": { "S": "$input.path('$.pageId')" }, "userName": { "S": "$input.path('$.userName')" }, "message": { "S": "$input.path('$.message')" } } }, indent=2) }, passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES ) create_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='PutItem', integration_http_method='POST', options=ddb_put_item_options ) method_responses = [ aws_apigateway.MethodResponse(status_code='200'), aws_apigateway.MethodResponse(status_code='400'), aws_apigateway.MethodResponse(status_code='500') ] all_resources.add_method('POST', create_integration, method_responses=method_responses, authorization_type=aws_apigateway.AuthorizationType.COGNITO, authorizer=auth ) get_response_templates = ''' #set($inputRoot = $input.path('$')) { "comments": [ #foreach($elem in $inputRoot.Items) { "commentId": "$elem.commentId.S", "userName": "******", "message": "$elem.message.S" }#if($foreach.hasNext),#end #end ] }''' ddb_query_item_options = aws_apigateway.IntegrationOptions( credentials_role=apigw_dynamodb_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ 'application/json': get_response_templates } ), *apigw_error_responses ], request_templates={ 'application/json': json.dumps({ "TableName": DDB_TABLE_NAME, "IndexName": "pageId-index", "KeyConditionExpression": "pageId = :v1", "ExpressionAttributeValues": { ":v1": { "S": "$input.params('pageId')" } } }, indent=2) }, passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES ) get_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='Query', integration_http_method='POST', options=ddb_query_item_options ) one_resource.add_method('GET', get_integration, method_responses=method_responses, authorization_type=aws_apigateway.AuthorizationType.COGNITO, authorizer=auth ) cdk.CfnOutput(self, 'DynamoDBTableName', value=ddb_table.table_name) cdk.CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id) cdk.CfnOutput(self, 'UserPoolClientId', value=user_pool_client.user_pool_client_id)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) env = kwargs.get('env', {}) if env['region'] == 'us-east-1': # simple AD not in us-east-2 # The VPC for simple AD simple_vpc = ec2.Vpc(self, 'devassoc-auth-simple', max_azs=2, cidr='10.40.0.0/16', subnet_configuration=[ ec2.SubnetConfiguration( name='simple-ad-demo-', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24) ]) core.Tags.of(simple_vpc).add('Name', 'devassoc-simple-ad-demo') self.vpc_id = simple_vpc.vpc_id core.CfnOutput(self, 'simple-vpc-id', value=simple_vpc.vpc_id) core.CfnOutput(self, 'simple-public-subnet-id-1', value=simple_vpc.public_subnets[0].subnet_id) core.CfnOutput( self, 'simple-public-subnet-az-1', value=simple_vpc.public_subnets[0].availability_zone) core.CfnOutput(self, 'simple-public-subnet-id-2', value=simple_vpc.public_subnets[1].subnet_id) core.CfnOutput( self, 'simple-public-subnet-az-2', value=simple_vpc.public_subnets[1].availability_zone) ad.CfnSimpleAD(self, 'simple-ad', name='simple-ad-demo', password='******', size='Small', vpc_settings={ "vpcId": simple_vpc.vpc_id, "subnetIds": [ simple_vpc.public_subnets[0].subnet_id, simple_vpc.public_subnets[1].subnet_id ] }) # The VPC for Microsoft AD microsoft_vpc = ec2.Vpc(self, 'devassoc-auth-microsoft', max_azs=2, cidr='10.30.0.0/16', subnet_configuration=[ ec2.SubnetConfiguration( name='microsoft-ad-demo-', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24) ]) core.Tags.of(microsoft_vpc).add('Name', 'devassoc-microsoft-ad-demo') self.vpc_id = microsoft_vpc.vpc_id core.CfnOutput(self, 'microsoft-vpc-id', value=microsoft_vpc.vpc_id) core.CfnOutput(self, 'microsoft-public-subnet-id-1', value=microsoft_vpc.public_subnets[0].subnet_id) core.CfnOutput(self, 'microsoft-public-subnet-az-1', value=microsoft_vpc.public_subnets[0].availability_zone) core.CfnOutput(self, 'microsoft-public-subnet-id-2', value=microsoft_vpc.public_subnets[1].subnet_id) core.CfnOutput(self, 'microsoft-public-subnet-az-2', value=microsoft_vpc.public_subnets[1].availability_zone) ad.CfnMicrosoftAD( self, 'microsoft-ad', name='corp.example.com', # must be valid as a DNS name short_name='corp', # console calls this "Directory NetBIOS name" password='******', edition='Standard', vpc_settings={ "vpcId": microsoft_vpc.vpc_id, "subnetIds": [ microsoft_vpc.public_subnets[0].subnet_id, microsoft_vpc.public_subnets[1].subnet_id ] }) # There should be a Cloud Directory example here, but I couldn't find a CDK API cognito_user_pool = cognito.UserPool( self, 'cognito-user-pool', user_pool_name='admin-group', sign_in_aliases={'username': True}) core.Tags.of(cognito_user_pool).add('user', 'admin-user')