def __init__(self, scope: core.Construct, id: str, secret_param, db_param, vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) secret = rds.DatabaseSecret(self,id="MasterUserSecret",username='******') ssm.StringParameter(self, "Secrete_Parameter", parameter_name=secret_param, string_value= secret.secret_arn) dbSubnetGroup = rds.CfnDBSubnetGroup (self, 'AuroraSubnetGroup', db_subnet_group_description = 'Subnet group to access aurora', subnet_ids = vpc.subnet_list, db_subnet_group_name = 'aurora-subnet-group' ) self.aurora_serverless = rds.CfnDBCluster(self, 'Serverless DB', master_username=secret.secret_value_from_json("username").to_string(), master_user_password=secret.secret_value_from_json("password").to_string(), engine = 'aurora', engine_mode = 'serverless', enable_http_endpoint = True, db_subnet_group_name = dbSubnetGroup.db_subnet_group_name, port = 3306, vpc_security_group_ids = [vpc.security_group.security_group_id], scaling_configuration=rds.CfnDBCluster.ScalingConfigurationProperty( auto_pause=True, min_capacity=1, max_capacity=2, seconds_until_auto_pause=300 ) ) self.aurora_serverless.node.add_dependency(dbSubnetGroup) self.aurora_serverless.node.add_dependency(vpc.security_group) secret_attached = sm.CfnSecretTargetAttachment( self, id="secret_attachment", secret_id=secret.secret_arn, target_id=self.aurora_serverless.ref, target_type="AWS::RDS::DBCluster", ) secret_attached.node.add_dependency(self.aurora_serverless) cluster_arn= "arn:aws:rds:{}:{}:cluster:{}".format(self.region,self.account,self.aurora_serverless.ref) ssm.StringParameter(self, "Database_Parameter", parameter_name=db_param, string_value= cluster_arn)
def __init__(self, stack: core.Stack, prefix: str, vpc_parameters: VPCParameters, database: Union[aws_rds.CfnDBInstance, aws_rds.CfnDBCluster], kms_key: Optional[aws_kms.Key] = None) -> None: """ Constructor. :param stack: A stack in which resources should be created. :param prefix: A prefix to give for every resource. :param vpc_parameters: VPC parameters for resource (e.g. lambda rotation function) configuration. :param database: A database instance for which this secret should be applied. :param kms_key: Custom or managed KMS key for secret encryption. """ super().__init__() # This template is sent to a lambda function that executes secret rotation. # If you choose to change this template, make sure you change lambda # function source code too. template = { 'engine': 'mysql', 'host': database.attr_endpoint_address, 'username': database.master_username, 'password': database.master_user_password, 'dbname': None, 'port': 3306 } # Instances and clusters have different attributes. if isinstance(database, aws_rds.CfnDBInstance): template['dbname'] = database.db_name elif isinstance(database, aws_rds.CfnDBCluster): template['dbname'] = database.database_name # Create a secret instance. self.secret = aws_secretsmanager.Secret( scope=stack, id=prefix + 'RdsSecret', description=f'A secret for {prefix}.', encryption_key=kms_key, generate_secret_string=SecretStringGenerator( generate_string_key='password', secret_string_template=json.dumps(template)), secret_name=prefix + 'RdsSecret') # Make sure database is fully deployed and configured before creating a secret for it. self.secret.node.add_dependency(database) # Create a lambda function for secret rotation. self.secret_rotation = SecretRotation(stack=stack, prefix=prefix, secret=self.secret, kms_key=kms_key, vpc_parameters=vpc_parameters, database=database) # Make sure secrets manager can invoke this lambda function. self.sm_invoke_permission = aws_lambda.CfnPermission( scope=stack, id=prefix + 'SecretsManagerInvokePermission', action='lambda:InvokeFunction', function_name=self.secret_rotation.rotation_lambda_function. function_name, principal="secretsmanager.amazonaws.com", ) # Make sure lambda function is created before making its permissions. self.sm_invoke_permission.node.add_dependency( self.secret_rotation.rotation_lambda_function) # Apply rotation for the secret instance. self.rotation_schedule = aws_secretsmanager.RotationSchedule( scope=stack, id=prefix + 'RotationSchedule', secret=self.secret, rotation_lambda=self.secret_rotation.rotation_lambda_function, automatically_after=core.Duration.days(30)) # Make sure invoke permission for secrets manager is created before creating a schedule. self.rotation_schedule.node.add_dependency(self.sm_invoke_permission) # Instances and clusters have different arns. if isinstance(database, aws_rds.CfnDBInstance): assert database.db_instance_identifier, 'Instance identifier must be specified.' target_arn = f'arn:aws:rds:eu-west-1:{stack.account}:db:{database.db_instance_identifier}' elif isinstance(database, aws_rds.CfnDBCluster): assert database.db_cluster_identifier, 'Cluster identifier must be specified.' target_arn = f'arn:aws:rds:eu-west-1:{stack.account}:cluster:{database.db_cluster_identifier}' else: raise TypeError('Unsupported DB type.') # Instances and clusters should have different attachment types. if isinstance(database, aws_rds.CfnDBInstance): target_type = 'AWS::RDS::DBInstance' elif isinstance(database, aws_rds.CfnDBCluster): target_type = 'AWS::RDS::DBCluster' else: raise TypeError('Unsupported DB type.') # Attach the secret instance to the desired database. self.target_db_attachment = aws_secretsmanager.CfnSecretTargetAttachment( scope=stack, id=prefix + 'TargetRdsAttachment', secret_id=self.secret.secret_arn, target_id=target_arn, target_type=target_type)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here # Create a VPC myvpc = ec2.Vpc(self, "CDKVPC", cidr=vars.cidr) # SG for ELB creation websitefrontendSG = ec2.SecurityGroup( self, 'websitefrontendSG', vpc=myvpc, security_group_name='websitefrontendSG') websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(80)) websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(443)) # Create ALB in VPC alb = elb.ApplicationLoadBalancer( self, 'websitefrontend-public', vpc=myvpc, load_balancer_name='websitefrontend-public', security_group=websitefrontendSG, internet_facing=True) # Add target group to ALB catalogtargetgroup = elb.ApplicationTargetGroup( self, 'CatalogTargetGroup', port=80, vpc=myvpc, target_type=elb.TargetType.IP) if not vars.sslcert: # Add http listener to ALB alblistenerhttp = elb.ApplicationListener( self, 'alblistenerhttp', load_balancer=alb, default_target_groups=[catalogtargetgroup], port=80) if vars.sslcert: # Add http listener to ALB alblistenerhttp = elb.ApplicationListener(self, 'alblistenerhttp', load_balancer=alb, port=80) elb.ApplicationListenerRule(self, 'httpredirectionrule', listener=alblistenerhttp, redirect_response=elb.RedirectResponse( status_code='HTTP_301', port='443', protocol='HTTPS')) # OPTIONAL - Add https listener to ALB & attach certificate alblistenerhttps = elb.ApplicationListener( self, 'alblistenerhttps', load_balancer=alb, default_target_groups=[catalogtargetgroup], port=443, certificate_arns=[vars.sslcert_arn]) # OPTIONAL - Redirect HTTP to HTTPS alblistenerhttp.add_redirect_response(id='redirectionrule', port='443', status_code='HTTP_301', protocol='HTTPS') if vars.customdomain: # OPTIONAL - Update DNS with ALB webshopxyz_zone = r53.HostedZone.from_hosted_zone_attributes( self, id='customdomain', hosted_zone_id=vars.hosted_zone_id, zone_name=vars.zone_name) webshop_root_record = r53.ARecord( self, 'ALBAliasRecord', zone=webshopxyz_zone, target=r53.RecordTarget.from_alias( alias.LoadBalancerTarget(alb))) # SG for ECS creation ECSSG = ec2.SecurityGroup(self, 'ECSSecurityGroup', vpc=myvpc, security_group_name='ECS') ECSSG.add_ingress_rule(peer=websitefrontendSG, connection=ec2.Port.tcp(80)) # SG for MySQL creation MySQLSG = ec2.SecurityGroup(self, 'DBSecurityGroup', vpc=myvpc, security_group_name='DB') MySQLSG.add_ingress_rule(peer=ECSSG, connection=ec2.Port.tcp(3306)) # Create DB subnet group subnetlist = [] for subnet in myvpc.private_subnets: subnetlist.append(subnet.subnet_id) subnetgr = rds.CfnDBSubnetGroup( self, 'democlustersubnetgroup', db_subnet_group_name='democlustersubnetgroup', db_subnet_group_description='DemoCluster', subnet_ids=subnetlist) # Create secret db passwd secret = sm.SecretStringGenerator( exclude_characters="\"'@/\\", secret_string_template='{"username": "******"}', generate_string_key='password', password_length=40) dbpass = sm.Secret(self, 'democlusterpass', secret_name='democlusterpass', generate_secret_string=secret) # Create Aurora serverless MySQL instance dbcluster = rds.CfnDBCluster( self, 'DemoCluster', engine='aurora', engine_mode='serverless', engine_version='5.6', db_cluster_identifier='DemoCluster', master_username=dbpass.secret_value_from_json( 'username').to_string(), master_user_password=dbpass.secret_value_from_json( 'password').to_string(), storage_encrypted=True, port=3306, vpc_security_group_ids=[MySQLSG.security_group_id], scaling_configuration=rds.CfnDBCluster. ScalingConfigurationProperty(auto_pause=True, max_capacity=4, min_capacity=1, seconds_until_auto_pause=300), db_subnet_group_name=subnetgr.db_subnet_group_name) dbcluster.add_override('DependsOn', 'democlustersubnetgroup') # Attach database to secret attach = sm.CfnSecretTargetAttachment( self, 'RDSAttachment', secret_id=dbpass.secret_arn, target_id=dbcluster.ref, target_type='AWS::RDS::DBCluster') # Upload image into ECR repo ecrdemoimage = ecra.DockerImageAsset(self, 'ecrdemoimage', directory='../', repository_name='demorepo', exclude=['cdk.out']) # Create ECS fargate cluster ecscluster = ecs.Cluster(self, "ecsCluster", vpc=myvpc) # Create task role for productsCatalogTask getsecretpolicystatement = iam.PolicyStatement(actions=[ "secretsmanager:GetResourcePolicy", "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret", "secretsmanager:ListSecretVersionIds" ], resources=[ dbpass.secret_arn ], effect=iam.Effect.ALLOW) getsecretpolicydocument = iam.PolicyDocument( statements=[getsecretpolicystatement]) taskrole = iam.Role( self, 'TaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name='TaskRoleforproductsCatalogTask', inline_policies=[getsecretpolicydocument]) # Create task definition taskdefinition = ecs.FargateTaskDefinition(self, 'productsCatalogTask', cpu=1024, memory_limit_mib=2048, task_role=taskrole) # Add container to task definition productscatalogcontainer = taskdefinition.add_container( 'productscatalogcontainer', image=ecs.ContainerImage.from_docker_image_asset( asset=ecrdemoimage), environment={ "region": vars.region, "secretname": "democlusterpass" }) productscatalogcontainer.add_port_mappings( ecs.PortMapping(container_port=80, host_port=80)) # Create service and associate it with the cluster catalogservice = ecs.FargateService( self, 'catalogservice', task_definition=taskdefinition, assign_public_ip=False, security_group=ECSSG, vpc_subnets=ec2.SubnetSelection(subnets=myvpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE).subnets), cluster=ecscluster, desired_count=2) # Add autoscaling to the service scaling = catalogservice.auto_scale_task_count(max_capacity=20, min_capacity=1) scaling.scale_on_cpu_utilization( 'ScaleOnCPU', target_utilization_percent=70, scale_in_cooldown=core.Duration.seconds(amount=1), scale_out_cooldown=core.Duration.seconds(amount=0)) # Associate the fargate service with load balancer targetgroup catalogservice.attach_to_application_target_group(catalogtargetgroup)
def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) db_master_user_name = "admin_user" secret = rds.DatabaseSecret(self, id="MasterUserSecret", username=db_master_user_name) subnet_ids = [] for subnet in vpc.isolated_subnets: subnet_ids.append(subnet.subnet_id) subnet_group = rds.CfnDBSubnetGroup( self, id="AuroraServerlessSubnetGroup", db_subnet_group_description= 'Aurora Postgres Serverless Subnet Group', subnet_ids=subnet_ids, db_subnet_group_name= 'auroraserverlesssubnetgroup' # needs to be all lowercase ) db_cluster_name = "aurora-serverless-postgres-db" security_group = ec2.SecurityGroup( self, id="SecurityGroup", vpc=vpc, description="Allow ssh access to ec2 instances", allow_all_outbound=True) security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'), ec2.Port.tcp(5432), "allow psql through") self.db = rds.CfnDBCluster( self, id="AuroraServerlessDB", engine=rds.DatabaseClusterEngine.AURORA_POSTGRESQL.name, engine_mode="serverless", db_subnet_group_name=subnet_group.db_subnet_group_name, vpc_security_group_ids=[security_group.security_group_id], availability_zones=vpc.availability_zones, db_cluster_identifier=db_cluster_name, #db_cluster_parameter_group_name= database_name="slsdb", master_username=secret.secret_value_from_json( "username").to_string(), master_user_password=secret.secret_value_from_json( "password").to_string(), port=5432, deletion_protection=False, scaling_configuration=rds.CfnDBCluster. ScalingConfigurationProperty(auto_pause=True, min_capacity=2, max_capacity=16, seconds_until_auto_pause=300), enable_cloudwatch_logs_exports=[ "error", "general", "slowquery", "audit" ], enable_http_endpoint=True #kms_key_id= #tags= ) self.db.node.add_dependency(subnet_group) self.db.node.add_dependency(security_group) #secret_attached = secret.attach(target=self) #secret.add_target_attachment(id="secret_attachment", target=self.db) secret_attached = sm.CfnSecretTargetAttachment( self, id="secret_attachment", secret_id=secret.secret_arn, target_id=self.db.ref, target_type="AWS::RDS::DBCluster", ) secret_attached.node.add_dependency(self.db) core.CfnOutput( self, id="StackName", value=self.stack_name, description="Stack Name", export_name= f"{self.region}:{self.account}:{self.stack_name}:stack-name") core.CfnOutput( self, id="DatabaseName", value=self.db.database_name, description="Database Name", export_name= f"{self.region}:{self.account}:{self.stack_name}:database-name") core.CfnOutput( self, id="DatabaseClusterArn", value= f"arn:aws:rds:{self.region}:{self.account}:cluster:{self.db.ref}", description="Database Cluster Arn", export_name= f"{self.region}:{self.account}:{self.stack_name}:database-cluster-arn" ) core.CfnOutput( self, id="DatabaseSecretArn", value=secret.secret_arn, description="Database Secret Arn", export_name= f"{self.region}:{self.account}:{self.stack_name}:database-secret-arn" ) core.CfnOutput( self, id="DatabaseClusterID", value=self.db.db_cluster_identifier, description="Database Cluster Id", export_name= f"{self.region}:{self.account}:{self.stack_name}:database-cluster-id" ) core.CfnOutput( self, id="AuroraEndpointAddress", value=self.db.attr_endpoint_address, description="Aurora Endpoint Address", export_name= f"{self.region}:{self.account}:{self.stack_name}:aurora-endpoint-address" ) core.CfnOutput( self, id="DatabaseMasterUserName", value=db_master_user_name, description="Database Master User Name", export_name= f"{self.region}:{self.account}:{self.stack_name}:database-master-username" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) with open("stack/config.yml", 'r') as stream: configs = yaml.safe_load(stream) ### S3 core images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES") images_S3_bucket.add_cors_rule( allowed_methods=[_s3.HttpMethods.POST], allowed_origins=["*"] # add API gateway web resource URL ) ### SQS core image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE") image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE", dead_letter_queue={ "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"], "queue": image_deadletter_queue }) ### api gateway core api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway') api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"]) api_gateway_landing_page_resource = api_gateway_resource.add_resource('web') api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl') api_gateway_image_search_resource = api_gateway_resource.add_resource('search') ### landing page function get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE", function_name="ICS_GET_LANDING_PAGE", runtime=Runtime.PYTHON_3_7, handler="main.handler", code=Code.asset("./src/landingPage")) get_landing_page_integration = LambdaIntegration( get_landing_page_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }]) ### cognito required_attribute = _cognito.StandardAttribute(required=True) users_pool = _cognito.UserPool(self, "ICS_USERS_POOL", auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up self_sign_up_enabled=configs["Cognito"]["SelfSignUp"]) user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", supported_identity_providers=["COGNITO"], allowed_o_auth_flows=["implicit"], allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"], user_pool_id=users_pool.user_pool_id, callback_ur_ls=[api_gateway_landing_page_resource.url], allowed_o_auth_flows_user_pool_client=True, explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"]) user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", user_pool=users_pool, cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"])) ### get signed URL function get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL", function_name="ICS_GET_SIGNED_URL", environment={ "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name, "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"] }, runtime=Runtime.PYTHON_3_7, handler="main.handler", code=Code.asset("./src/getSignedUrl")) get_signedurl_integration = LambdaIntegration( get_signedurl_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER", rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id, name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER", type="COGNITO_USER_POOLS", identity_source="method.request.header.Authorization", provider_arns=[users_pool.user_pool_arn]) api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration, authorization_type=AuthorizationType.COGNITO, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }] ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref) images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*") ### image massage function image_massage_function = Function(self, "ICS_IMAGE_MASSAGE", function_name="ICS_IMAGE_MASSAGE", timeout=core.Duration.seconds(6), runtime=Runtime.PYTHON_3_7, environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name}, handler="main.handler", code=Code.asset("./src/imageMassage")) images_S3_bucket.grant_write(image_massage_function, "processed/*") images_S3_bucket.grant_delete(image_massage_function, "new/*") images_S3_bucket.grant_read(image_massage_function, "new/*") new_image_added_notification = _s3notification.LambdaDestination(image_massage_function) images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, new_image_added_notification, _s3.NotificationKeyFilter(prefix="new/") ) image_queue.grant_send_messages(image_massage_function) ### image analyzer function image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS", function_name="ICS_IMAGE_ANALYSIS", runtime=Runtime.PYTHON_3_7, timeout=core.Duration.seconds(10), environment={ "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name, "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"], "REGION": core.Aws.REGION, }, handler="main.handler", code=Code.asset("./src/imageAnalysis")) image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10)) image_queue.grant_consume_messages(image_massage_function) lambda_rekognition_access = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"], resources=["*"] ) image_analyzer_function.add_to_role_policy(lambda_rekognition_access) images_S3_bucket.grant_read(image_analyzer_function, "processed/*") ### API gateway finalizing self.add_cors_options(api_gateway_get_signedurl_resource) self.add_cors_options(api_gateway_landing_page_resource) self.add_cors_options(api_gateway_image_search_resource) ### database database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET", secret_name="rds-db-credentials/image-content-search-rds-secret", generate_secret_string=_secrets_manager.SecretStringGenerator( generate_string_key='password', secret_string_template='{"username": "******"}', exclude_punctuation=True, exclude_characters='/@\" \\\'', require_each_included_type=True ) ) database = _rds.CfnDBCluster(self, "ICS_DATABASE", engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type, engine_mode="serverless", database_name=configs["Database"]["Name"], enable_http_endpoint=True, deletion_protection=configs["Database"]["DeletionProtection"], master_username=database_secret.secret_value_from_json("username").to_string(), master_user_password=database_secret.secret_value_from_json("password").to_string(), scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty( auto_pause=configs["Database"]["Scaling"]["AutoPause"], min_capacity=configs["Database"]["Scaling"]["Min"], max_capacity=configs["Database"]["Scaling"]["Max"], seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"] ), ) database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref) secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET", target_type="AWS::RDS::DBCluster", target_id=database.ref, secret_id=database_secret.secret_arn ) secret_target.node.add_dependency(database) ### database function image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE", role_name="ICS_IMAGE_DATA_FUNCTION_ROLE", assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"), _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"), _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess") ] ) image_data_function = Function(self, "ICS_IMAGE_DATA", function_name="ICS_IMAGE_DATA", runtime=Runtime.PYTHON_3_7, timeout=core.Duration.seconds(5), role=image_data_function_role, environment={ "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"], "CLUSTER_ARN": database_cluster_arn, "CREDENTIALS_ARN": database_secret.secret_arn, "DB_NAME": database.database_name, "REGION": core.Aws.REGION }, handler="main.handler", code=Code.asset("./src/imageData") ) image_search_integration = LambdaIntegration( image_data_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER", rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id, name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER", type="COGNITO_USER_POOLS", identity_source="method.request.header.Authorization", provider_arns=[users_pool.user_pool_arn]) api_gateway_image_search_resource.add_method('POST', image_search_integration, authorization_type=AuthorizationType.COGNITO, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }] ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref) lambda_access_search = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["translate:TranslateText"], resources=["*"] ) image_data_function.add_to_role_policy(lambda_access_search) ### custom resource lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', on_event_handler=image_data_function ) core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', service_token=lambda_provider.service_token, pascal_case_properties=False, resource_type="Custom::SchemaCreation", properties={ "source": "Cloudformation" } ) ### event bridge event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS") event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE", rule_name="ICS_IMAGE_CONTENT_RULE", description="The event from image analyzer to store the data", event_bus=event_bus, event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]), ) event_rule.add_target(_event_targets.LambdaFunction(image_data_function)) event_bus.grant_put_events(image_analyzer_function) image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name) ### outputs core.CfnOutput(self, 'CognitoHostedUILogin', value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url), description='The Cognito Hosted UI Login Page' )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc(self, "VPC") db_master_user_name = os.getenv("DB_USERNAME", "admin_user") self.secret = rds.DatabaseSecret( self, id="MasterUserSecret", username=db_master_user_name ) rds.CfnDBSubnetGroup( self, "rdsSubnetGroup", db_subnet_group_description="private subnets for rds", subnet_ids=vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE ).subnet_ids, ) db_name = os.getenv("DB_NAME", "anonfed") self.db = rds.CfnDBCluster( self, "auroraCluster", engine="aurora-mysql", engine_version="5.7.mysql_aurora.2.08.1", db_cluster_parameter_group_name="default.aurora-mysql5.7", # snapshot_identifier="<snapshot_arn>", # your snapshot engine_mode="serverless", scaling_configuration=rds.CfnDBCluster.ScalingConfigurationProperty( auto_pause=True, min_capacity=1, max_capacity=4, seconds_until_auto_pause=300, ), db_subnet_group_name=core.Fn.ref("rdsSubnetGroup"), database_name=db_name, master_username=self.secret.secret_value_from_json("username").to_string(), master_user_password=self.secret.secret_value_from_json( "password" ).to_string(), enable_http_endpoint=True, ) secret_attached = sm.CfnSecretTargetAttachment( self, id="secret_attachment", secret_id=self.secret.secret_arn, target_id=self.db.ref, target_type="AWS::RDS::DBCluster", ) secret_attached.node.add_dependency(self.db) db_ref = f"arn:aws:rds:{self.region}:{self.account}:cluster:{self.db.ref}" migration = SchemaMigrationResource( self, "schemamigration", self.secret.secret_arn, db_name, db_ref ) # Publish the custom resource output core.CfnOutput( self, "ResponseMessage", description="Database Migration", value=migration.response, ) core.CfnOutput( self, id="DatabaseName", value=self.db.database_name, description="Database Name", export_name=f"{self.region}:{self.account}:{self.stack_name}:database-name", ) core.CfnOutput( self, id="DatabaseClusterArn", value=db_ref, description="Database Cluster Arn", export_name=f"{self.region}:{self.account}:{self.stack_name}:database-cluster-arn", ) core.CfnOutput( self, id="DatabaseSecretArn", value=self.secret.secret_arn, description="Database Secret Arn", export_name=f"{self.region}:{self.account}:{self.stack_name}:database-secret-arn", ) core.CfnOutput( self, id="AuroraEndpointAddress", value=self.db.attr_endpoint_address, description="Aurora Endpoint Address", export_name=f"{self.region}:{self.account}:{self.stack_name}:aurora-endpoint-address", ) core.CfnOutput( self, id="DatabaseMasterUserName", value=db_master_user_name, description="Database Master User Name", export_name=f"{self.region}:{self.account}:{self.stack_name}:database-master-username", )