示例#1
0
def create_rds_cluster(scope: core.Construct, stack_name: str, vpc: IVpc,
                       database_name: str, database_username: str,
                       datbase_password: str, database_encrypted: bool):
    subnet_ids = []
    for subnet in vpc.private_subnets:
        subnet_ids.append(subnet.subnet_id)

    db_subnet_group = rds.CfnDBSubnetGroup(
        scope=scope,
        id='dbSubnetGroup',
        db_subnet_group_description='Subnet group to access RDS',
        db_subnet_group_name=stack_name,
        subnet_ids=subnet_ids,
    )

    db_scurity_group = ec2.SecurityGroup(
        scope=scope,
        id='dbSecurityGroup',
        vpc=vpc,
        allow_all_outbound=True,
        description=stack_name,
    )
    db_scurity_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432))

    database = rds.CfnDBCluster(
        scope=scope,
        id='database',
        database_name=database_name,
        db_cluster_identifier=stack_name,
        engine=rds.DatabaseInstanceEngine.POSTGRES,
        engine_mode='serverless',
        master_username=database_username,
        master_user_password=core.SecretValue.plain_text(
            datbase_password).to_string(),
        port=5432,
        db_subnet_group_name=db_subnet_group.db_subnet_group_name,
        vpc_security_group_ids=[db_scurity_group.security_group_id],
        storage_encrypted=database_encrypted,
        scaling_configuration=rds.CfnDBCluster.ScalingConfigurationProperty(
            auto_pause=True,
            max_capacity=2,
            min_capacity=1,
            seconds_until_auto_pause=3600,
        ))
    database.add_depends_on(db_subnet_group)

    return database
示例#2
0
    def __init__(self, scope: core.Construct, id: str, secret_param, db_param, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        secret = rds.DatabaseSecret(self,id="MasterUserSecret",username='******')
        ssm.StringParameter(self, "Secrete_Parameter", parameter_name=secret_param, string_value= secret.secret_arn)

        dbSubnetGroup = rds.CfnDBSubnetGroup (self, 'AuroraSubnetGroup',
        	db_subnet_group_description = 'Subnet group to access aurora',
        	subnet_ids = vpc.subnet_list,
        	db_subnet_group_name = 'aurora-subnet-group'
        	)

        self.aurora_serverless = rds.CfnDBCluster(self, 'Serverless DB',
            master_username=secret.secret_value_from_json("username").to_string(),
            master_user_password=secret.secret_value_from_json("password").to_string(),
        	engine = 'aurora',
            engine_mode = 'serverless',
            enable_http_endpoint = True,
            db_subnet_group_name = dbSubnetGroup.db_subnet_group_name,
            port = 3306,
            vpc_security_group_ids = [vpc.security_group.security_group_id],
            scaling_configuration=rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=True,
                min_capacity=1,
                max_capacity=2,
                seconds_until_auto_pause=300
            )
        )
        self.aurora_serverless.node.add_dependency(dbSubnetGroup)
        self.aurora_serverless.node.add_dependency(vpc.security_group)

        secret_attached = sm.CfnSecretTargetAttachment(
            self,
            id="secret_attachment",
            secret_id=secret.secret_arn,
            target_id=self.aurora_serverless.ref,
            target_type="AWS::RDS::DBCluster",
        )
        secret_attached.node.add_dependency(self.aurora_serverless)

        cluster_arn= "arn:aws:rds:{}:{}:cluster:{}".format(self.region,self.account,self.aurora_serverless.ref)
        ssm.StringParameter(self, "Database_Parameter", parameter_name=db_param, string_value= cluster_arn)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Exercise 9
        db_password_parameters = core.CfnParameter(
            self,
            "DBPassword",
            no_echo=True,
            description="New account and RDS password",
            min_length=1,
            max_length=41,
            constraint_description=
            "the password must be between 1 and 41 characters",
            default="DBPassword")

        # DBSecurityGroup:
        db_security_group = ec2.CfnSecurityGroup(
            self,
            "DBSecurityGroup",
            group_description="DB traffic",
            vpc_id=core.Fn.import_value("VPC"),
            security_group_ingress=[{
                "ipProtocol":
                "tcp",
                "fromPort":
                3306,
                "toPort":
                3306,
                "sourceSecurityGroupId":
                core.Fn.import_value("WebSecurityGroupOutput"),
            }, {
                "ipProtocol":
                "tcp",
                "fromPort":
                3306,
                "toPort":
                3306,
                "sourceSecurityGroupId":
                core.Fn.import_value("EdxProjectCloud9Sg"),
            }, {
                "ipProtocol":
                "tcp",
                "fromPort":
                3306,
                "toPort":
                3306,
                "sourceSecurityGroupId":
                core.Fn.import_value("LambdaSecurityGroupOutput"),
            }],
            security_group_egress=[{
                "ipProtocol": "tcp",
                "fromPort": 0,
                "toPort": 65535,
                "cidrIp": "0.0.0.0/0"
            }],
        )

        # MyDBSubnetGroup
        my_db_subnet_group = rds.CfnDBSubnetGroup(
            self,
            "DBSubnetGroup",
            db_subnet_group_description="MyDBSubnetGroup",
            subnet_ids=[
                core.Fn.import_value("PrivateSubnet1"),
                core.Fn.import_value("PrivateSubnet2")
            ])

        # RDSCluster
        rds_cluster = rds.CfnDBCluster(
            self,
            "RDSCluster",
            db_cluster_identifier="edx-photos-db",
            database_name="Photos",
            master_username="******",
            master_user_password=db_password_parameters.value_as_string,
            engine_mode="serverless",
            scaling_configuration={
                "autoPause": True,
                "maxCapacity": 4,
                "minCapacity": 2
            },
            engine="aurora",
            db_subnet_group_name=my_db_subnet_group.ref,
            vpc_security_group_ids=[db_security_group.ref])
        rds_cluster.apply_removal_policy(core.RemovalPolicy.DESTROY)

        # Output
        core.CfnOutput(self,
                       "MyDBEndpoint",
                       value=rds_cluster.attr_endpoint_address,
                       description="MyDB Endpoint",
                       export_name="MyDBEndpoint")
示例#4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # Create a VPC
        myvpc = ec2.Vpc(self, "CDKVPC", cidr=vars.cidr)

        # SG for ELB creation
        websitefrontendSG = ec2.SecurityGroup(
            self,
            'websitefrontendSG',
            vpc=myvpc,
            security_group_name='websitefrontendSG')
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(80))
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(443))

        # Create ALB in VPC
        alb = elb.ApplicationLoadBalancer(
            self,
            'websitefrontend-public',
            vpc=myvpc,
            load_balancer_name='websitefrontend-public',
            security_group=websitefrontendSG,
            internet_facing=True)

        # Add target group to ALB
        catalogtargetgroup = elb.ApplicationTargetGroup(
            self,
            'CatalogTargetGroup',
            port=80,
            vpc=myvpc,
            target_type=elb.TargetType.IP)

        if not vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(
                self,
                'alblistenerhttp',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=80)

        if vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(self,
                                                      'alblistenerhttp',
                                                      load_balancer=alb,
                                                      port=80)
            elb.ApplicationListenerRule(self,
                                        'httpredirectionrule',
                                        listener=alblistenerhttp,
                                        redirect_response=elb.RedirectResponse(
                                            status_code='HTTP_301',
                                            port='443',
                                            protocol='HTTPS'))
            # OPTIONAL - Add https listener to ALB & attach certificate
            alblistenerhttps = elb.ApplicationListener(
                self,
                'alblistenerhttps',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=443,
                certificate_arns=[vars.sslcert_arn])

            # OPTIONAL - Redirect HTTP to HTTPS
            alblistenerhttp.add_redirect_response(id='redirectionrule',
                                                  port='443',
                                                  status_code='HTTP_301',
                                                  protocol='HTTPS')

        if vars.customdomain:
            # OPTIONAL - Update DNS with ALB
            webshopxyz_zone = r53.HostedZone.from_hosted_zone_attributes(
                self,
                id='customdomain',
                hosted_zone_id=vars.hosted_zone_id,
                zone_name=vars.zone_name)
            webshop_root_record = r53.ARecord(
                self,
                'ALBAliasRecord',
                zone=webshopxyz_zone,
                target=r53.RecordTarget.from_alias(
                    alias.LoadBalancerTarget(alb)))

        # SG for ECS creation
        ECSSG = ec2.SecurityGroup(self,
                                  'ECSSecurityGroup',
                                  vpc=myvpc,
                                  security_group_name='ECS')
        ECSSG.add_ingress_rule(peer=websitefrontendSG,
                               connection=ec2.Port.tcp(80))

        # SG for MySQL creation
        MySQLSG = ec2.SecurityGroup(self,
                                    'DBSecurityGroup',
                                    vpc=myvpc,
                                    security_group_name='DB')
        MySQLSG.add_ingress_rule(peer=ECSSG, connection=ec2.Port.tcp(3306))

        # Create DB subnet group
        subnetlist = []
        for subnet in myvpc.private_subnets:
            subnetlist.append(subnet.subnet_id)
        subnetgr = rds.CfnDBSubnetGroup(
            self,
            'democlustersubnetgroup',
            db_subnet_group_name='democlustersubnetgroup',
            db_subnet_group_description='DemoCluster',
            subnet_ids=subnetlist)

        # Create secret db passwd
        secret = sm.SecretStringGenerator(
            exclude_characters="\"'@/\\",
            secret_string_template='{"username": "******"}',
            generate_string_key='password',
            password_length=40)
        dbpass = sm.Secret(self,
                           'democlusterpass',
                           secret_name='democlusterpass',
                           generate_secret_string=secret)

        # Create Aurora serverless MySQL instance
        dbcluster = rds.CfnDBCluster(
            self,
            'DemoCluster',
            engine='aurora',
            engine_mode='serverless',
            engine_version='5.6',
            db_cluster_identifier='DemoCluster',
            master_username=dbpass.secret_value_from_json(
                'username').to_string(),
            master_user_password=dbpass.secret_value_from_json(
                'password').to_string(),
            storage_encrypted=True,
            port=3306,
            vpc_security_group_ids=[MySQLSG.security_group_id],
            scaling_configuration=rds.CfnDBCluster.
            ScalingConfigurationProperty(auto_pause=True,
                                         max_capacity=4,
                                         min_capacity=1,
                                         seconds_until_auto_pause=300),
            db_subnet_group_name=subnetgr.db_subnet_group_name)
        dbcluster.add_override('DependsOn', 'democlustersubnetgroup')

        # Attach database to secret
        attach = sm.CfnSecretTargetAttachment(
            self,
            'RDSAttachment',
            secret_id=dbpass.secret_arn,
            target_id=dbcluster.ref,
            target_type='AWS::RDS::DBCluster')

        # Upload image into ECR repo
        ecrdemoimage = ecra.DockerImageAsset(self,
                                             'ecrdemoimage',
                                             directory='../',
                                             repository_name='demorepo',
                                             exclude=['cdk.out'])

        # Create ECS fargate cluster
        ecscluster = ecs.Cluster(self, "ecsCluster", vpc=myvpc)

        # Create task role for productsCatalogTask
        getsecretpolicystatement = iam.PolicyStatement(actions=[
            "secretsmanager:GetResourcePolicy",
            "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret",
            "secretsmanager:ListSecretVersionIds"
        ],
                                                       resources=[
                                                           dbpass.secret_arn
                                                       ],
                                                       effect=iam.Effect.ALLOW)
        getsecretpolicydocument = iam.PolicyDocument(
            statements=[getsecretpolicystatement])
        taskrole = iam.Role(
            self,
            'TaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            role_name='TaskRoleforproductsCatalogTask',
            inline_policies=[getsecretpolicydocument])

        # Create task definition
        taskdefinition = ecs.FargateTaskDefinition(self,
                                                   'productsCatalogTask',
                                                   cpu=1024,
                                                   memory_limit_mib=2048,
                                                   task_role=taskrole)

        # Add container to task definition
        productscatalogcontainer = taskdefinition.add_container(
            'productscatalogcontainer',
            image=ecs.ContainerImage.from_docker_image_asset(
                asset=ecrdemoimage),
            environment={
                "region": vars.region,
                "secretname": "democlusterpass"
            })
        productscatalogcontainer.add_port_mappings(
            ecs.PortMapping(container_port=80, host_port=80))

        # Create service and associate it with the cluster
        catalogservice = ecs.FargateService(
            self,
            'catalogservice',
            task_definition=taskdefinition,
            assign_public_ip=False,
            security_group=ECSSG,
            vpc_subnets=ec2.SubnetSelection(subnets=myvpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnets),
            cluster=ecscluster,
            desired_count=2)

        # Add autoscaling to the service
        scaling = catalogservice.auto_scale_task_count(max_capacity=20,
                                                       min_capacity=1)
        scaling.scale_on_cpu_utilization(
            'ScaleOnCPU',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(amount=1),
            scale_out_cooldown=core.Duration.seconds(amount=0))

        # Associate the fargate service with load balancer targetgroup
        catalogservice.attach_to_application_target_group(catalogtargetgroup)
示例#5
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        db_master_user_name = "admin_user"

        secret = rds.DatabaseSecret(self,
                                    id="MasterUserSecret",
                                    username=db_master_user_name)

        subnet_ids = []
        for subnet in vpc.isolated_subnets:
            subnet_ids.append(subnet.subnet_id)

        subnet_group = rds.CfnDBSubnetGroup(
            self,
            id="AuroraServerlessSubnetGroup",
            db_subnet_group_description=
            'Aurora Postgres Serverless Subnet Group',
            subnet_ids=subnet_ids,
            db_subnet_group_name=
            'auroraserverlesssubnetgroup'  # needs to be all lowercase
        )

        db_cluster_name = "aurora-serverless-postgres-db"

        security_group = ec2.SecurityGroup(
            self,
            id="SecurityGroup",
            vpc=vpc,
            description="Allow ssh access to ec2 instances",
            allow_all_outbound=True)
        security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'),
                                        ec2.Port.tcp(5432),
                                        "allow psql through")

        self.db = rds.CfnDBCluster(
            self,
            id="AuroraServerlessDB",
            engine=rds.DatabaseClusterEngine.AURORA_POSTGRESQL.name,
            engine_mode="serverless",
            db_subnet_group_name=subnet_group.db_subnet_group_name,
            vpc_security_group_ids=[security_group.security_group_id],
            availability_zones=vpc.availability_zones,
            db_cluster_identifier=db_cluster_name,
            #db_cluster_parameter_group_name=
            database_name="slsdb",
            master_username=secret.secret_value_from_json(
                "username").to_string(),
            master_user_password=secret.secret_value_from_json(
                "password").to_string(),
            port=5432,
            deletion_protection=False,
            scaling_configuration=rds.CfnDBCluster.
            ScalingConfigurationProperty(auto_pause=True,
                                         min_capacity=2,
                                         max_capacity=16,
                                         seconds_until_auto_pause=300),
            enable_cloudwatch_logs_exports=[
                "error", "general", "slowquery", "audit"
            ],
            enable_http_endpoint=True
            #kms_key_id=
            #tags=
        )
        self.db.node.add_dependency(subnet_group)
        self.db.node.add_dependency(security_group)

        #secret_attached = secret.attach(target=self)
        #secret.add_target_attachment(id="secret_attachment", target=self.db)

        secret_attached = sm.CfnSecretTargetAttachment(
            self,
            id="secret_attachment",
            secret_id=secret.secret_arn,
            target_id=self.db.ref,
            target_type="AWS::RDS::DBCluster",
        )
        secret_attached.node.add_dependency(self.db)

        core.CfnOutput(
            self,
            id="StackName",
            value=self.stack_name,
            description="Stack Name",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:stack-name")

        core.CfnOutput(
            self,
            id="DatabaseName",
            value=self.db.database_name,
            description="Database Name",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:database-name")

        core.CfnOutput(
            self,
            id="DatabaseClusterArn",
            value=
            f"arn:aws:rds:{self.region}:{self.account}:cluster:{self.db.ref}",
            description="Database Cluster Arn",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:database-cluster-arn"
        )

        core.CfnOutput(
            self,
            id="DatabaseSecretArn",
            value=secret.secret_arn,
            description="Database Secret Arn",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:database-secret-arn"
        )

        core.CfnOutput(
            self,
            id="DatabaseClusterID",
            value=self.db.db_cluster_identifier,
            description="Database Cluster Id",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:database-cluster-id"
        )

        core.CfnOutput(
            self,
            id="AuroraEndpointAddress",
            value=self.db.attr_endpoint_address,
            description="Aurora Endpoint Address",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:aurora-endpoint-address"
        )

        core.CfnOutput(
            self,
            id="DatabaseMasterUserName",
            value=db_master_user_name,
            description="Database Master User Name",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:database-master-username"
        )
示例#6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("stack/config.yml", 'r') as stream:
            configs = yaml.safe_load(stream)

        ### S3 core
        images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES")

        images_S3_bucket.add_cors_rule(
            allowed_methods=[_s3.HttpMethods.POST],
            allowed_origins=["*"] # add API gateway web resource URL
        )

        ### SQS core
        image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE")
        image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE",
            dead_letter_queue={
                "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"],
                "queue": image_deadletter_queue
            })

        ### api gateway core
        api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway')
        api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"])
        api_gateway_landing_page_resource = api_gateway_resource.add_resource('web')
        api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl')
        api_gateway_image_search_resource = api_gateway_resource.add_resource('search')

        ### landing page function
        get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE",
            function_name="ICS_GET_LANDING_PAGE",
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/landingPage"))

        get_landing_page_integration = LambdaIntegration(
            get_landing_page_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        ### cognito
        required_attribute = _cognito.StandardAttribute(required=True)

        users_pool = _cognito.UserPool(self, "ICS_USERS_POOL",
            auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
            standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
            self_sign_up_enabled=configs["Cognito"]["SelfSignUp"])

        user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", 
            supported_identity_providers=["COGNITO"],
            allowed_o_auth_flows=["implicit"],
            allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"],
            user_pool_id=users_pool.user_pool_id,
            callback_ur_ls=[api_gateway_landing_page_resource.url],
            allowed_o_auth_flows_user_pool_client=True,
            explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])

        user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", 
            user_pool=users_pool, 
            cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"]))

        ### get signed URL function
        get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL",
            function_name="ICS_GET_SIGNED_URL",
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"]
            },
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/getSignedUrl"))

        get_signedurl_integration = LambdaIntegration(
            get_signedurl_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref)

        images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*")

        ### image massage function
        image_massage_function = Function(self, "ICS_IMAGE_MASSAGE",
            function_name="ICS_IMAGE_MASSAGE",
            timeout=core.Duration.seconds(6),
            runtime=Runtime.PYTHON_3_7,
            environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name},
            handler="main.handler",
            code=Code.asset("./src/imageMassage"))

        images_S3_bucket.grant_write(image_massage_function, "processed/*")
        images_S3_bucket.grant_delete(image_massage_function, "new/*")
        images_S3_bucket.grant_read(image_massage_function, "new/*")
        
        new_image_added_notification = _s3notification.LambdaDestination(image_massage_function)

        images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, 
            new_image_added_notification, 
            _s3.NotificationKeyFilter(prefix="new/")
            )

        image_queue.grant_send_messages(image_massage_function)

        ### image analyzer function
        image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS",
            function_name="ICS_IMAGE_ANALYSIS",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(10),
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "REGION": core.Aws.REGION,
                },
            handler="main.handler",
            code=Code.asset("./src/imageAnalysis")) 

        image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10))
        image_queue.grant_consume_messages(image_massage_function)

        lambda_rekognition_access = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
            resources=["*"]                    
        )

        image_analyzer_function.add_to_role_policy(lambda_rekognition_access)
        images_S3_bucket.grant_read(image_analyzer_function, "processed/*")

        ### API gateway finalizing
        self.add_cors_options(api_gateway_get_signedurl_resource)
        self.add_cors_options(api_gateway_landing_page_resource)
        self.add_cors_options(api_gateway_image_search_resource)

        ### database 
        database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET",
            secret_name="rds-db-credentials/image-content-search-rds-secret",
            generate_secret_string=_secrets_manager.SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                exclude_characters='/@\" \\\'',
                require_each_included_type=True
            )
        )

        database = _rds.CfnDBCluster(self, "ICS_DATABASE",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
            engine_mode="serverless",
            database_name=configs["Database"]["Name"],
            enable_http_endpoint=True,
            deletion_protection=configs["Database"]["DeletionProtection"],
            master_username=database_secret.secret_value_from_json("username").to_string(),
            master_user_password=database_secret.secret_value_from_json("password").to_string(),
            scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=configs["Database"]["Scaling"]["AutoPause"],
                min_capacity=configs["Database"]["Scaling"]["Min"],
                max_capacity=configs["Database"]["Scaling"]["Max"],
                seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"]
            ),
        )

        database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
   
        secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET",
            target_type="AWS::RDS::DBCluster",
            target_id=database.ref,
            secret_id=database_secret.secret_arn
        )

        secret_target.node.add_dependency(database)

        ### database function
        image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE",
            role_name="ICS_IMAGE_DATA_FUNCTION_ROLE",
            assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
            ]
        )
        
        image_data_function = Function(self, "ICS_IMAGE_DATA",
            function_name="ICS_IMAGE_DATA",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(5),
            role=image_data_function_role,
            environment={
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "CLUSTER_ARN": database_cluster_arn,
                "CREDENTIALS_ARN": database_secret.secret_arn,
                "DB_NAME": database.database_name,
                "REGION": core.Aws.REGION
                },
            handler="main.handler",
            code=Code.asset("./src/imageData")
        ) 

        image_search_integration = LambdaIntegration(
            image_data_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            type="COGNITO_USER_POOLS", 
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_image_search_resource.add_method('POST', image_search_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref)


        lambda_access_search = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["translate:TranslateText"],
            resources=["*"]            
        ) 

        image_data_function.add_to_role_policy(lambda_access_search)

        ### custom resource
        lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', 
            on_event_handler=image_data_function
        )

        core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', 
            service_token=lambda_provider.service_token,
            pascal_case_properties=False,
            resource_type="Custom::SchemaCreation",
            properties={
                "source": "Cloudformation"
            }
        )

        ### event bridge
        event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS")

        event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE",
            rule_name="ICS_IMAGE_CONTENT_RULE",
            description="The event from image analyzer to store the data",
            event_bus=event_bus,
            event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]),
        )

        event_rule.add_target(_event_targets.LambdaFunction(image_data_function))

        event_bus.grant_put_events(image_analyzer_function)
        image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name)

        ### outputs
        core.CfnOutput(self, 'CognitoHostedUILogin',
            value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url),
            description='The Cognito Hosted UI Login Page'
        )
示例#7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ### VPC and subnets
        vpc = ec2.Vpc(self,
                      "vpc",
                      cidr="172.20.0.0/24",
                      nat_gateways=0,
                      max_azs=2,
                      enable_dns_hostnames=True,
                      enable_dns_support=True,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              cidr_mask=26,
                              name="roundcube",
                              subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        ### Define an image and create instance
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Import role
        route53_role = iam.Role.from_role_arn(
            self, "role_id", "arn:aws:iam::585823398980:role/ec2WriteOvpnZone")

        instance = ec2.Instance(self,
                                "instance",
                                instance_type=ec2.InstanceType("t3a.nano"),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                role=route53_role,
                                key_name="roundcube-key")

        instance.connections.allow_from(ec2.Peer.ipv4("109.255.202.235/32"),
                                        ec2.Port.tcp(22), "Allow ssh")
        instance.connections.allow_from(ec2.Peer.ipv4("109.255.202.235/32"),
                                        ec2.Port.tcp(443), "Allow HTTPS")

        ### Aurora cluster
        aurora_secret = sm.Secret(
            self,
            "secret",
            generate_secret_string=sm.SecretStringGenerator(
                generate_string_key="password",
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                password_length=30))

        # no l2 construct for serverless yet
        db_subnet_list = []
        for sn in vpc.public_subnets:
            db_subnet_list.append(sn.subnet_id)

        db_subnets = rds.CfnDBSubnetGroup(
            self,
            "db-subnet-group",
            db_subnet_group_description="subnet group",
            subnet_ids=db_subnet_list)

        rds.CfnDBCluster(
            self,
            "db-cluster",
            database_name="roundcube",
            db_cluster_identifier="serverless-cluster",
            master_username=aurora_secret.secret_value_from_json(
                'username').to_string(),
            master_user_password=aurora_secret.secret_value_from_json(
                'password').to_string(),
            engine="aurora",
            engine_mode="serverless",
            enable_http_endpoint=True,
            scaling_configuration=rds.CfnDBCluster.
            ScalingConfigurationProperty(
                auto_pause=True,
                min_capacity=1,
                max_capacity=1,
                seconds_until_auto_pause=900,
            ),
            deletion_protection=False,
            db_subnet_group_name=db_subnets.ref)

        # rds.DatabaseCluster(self, "aurora_cluster",
        #     engine                = rds.DatabaseClusterEngine.aurora_postgres(version = rds.AuroraPostgresEngineVersion.VER_11_7),
        #     default_database_name = "roundcube",
        #     #parameter_group       = pgroup,
        #     master_user           = rds.Login(username = "******"),
        #     removal_policy        = core.RemovalPolicy.DESTROY,
        #     instance_props        = rds.InstanceProps(
        #         vpc                   = vpc,
        #         instance_type         = ec2.InstanceType.of(
        #             ec2.InstanceClass.MEMORY5,
        #             ec2.InstanceSize.LARGE
        #         )
        #     )
        # )

        ### Resource group
        rg.CfnGroup(self,
                    "env-group",
                    name="roundcube",
                    resource_query=rg.CfnGroup.ResourceQueryProperty(
                        type="TAG_FILTERS_1_0",
                        query=rg.CfnGroup.QueryProperty(
                            resource_type_filters=["AWS::AllSupported"],
                            tag_filters=[
                                rg.CfnGroup.TagFilterProperty(
                                    key="resource-group", values=["roundcube"])
                            ])))
示例#8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs,) -> None:
        super().__init__(scope, id, **kwargs)

        # secrets manager for DB password
        self.db_secret = secrets.Secret(
            self,
            "DBSecret",
            secret_name=f"{scope.full_app_name}-db-secret",
            generate_secret_string=secrets.SecretStringGenerator(
                secret_string_template=json.dumps({"username": "******"}),
                exclude_punctuation=True,
                include_space=False,
                generate_string_key="password",
            ),
        )

        self.db_secret_arn = ssm.StringParameter(
            self,
            "DBSecretArn",
            parameter_name=f"{scope.full_app_name}-secret-arn",
            string_value=self.db_secret.secret_arn,
        )

        self.db_security_group = ec2.CfnSecurityGroup(
            self,
            "DBSecurityGroup",
            vpc_id=scope.vpc.vpc_id,
            group_description="DBSecurityGroup",
            security_group_ingress=[
                ec2.CfnSecurityGroup.IngressProperty(
                    ip_protocol="tcp",
                    to_port=5432,
                    from_port=5432,
                    source_security_group_id=scope.vpc.vpc_default_security_group,
                )
            ],
        )

        self.db_subnet_group = rds.CfnDBSubnetGroup(
            self,
            "CfnDBSubnetGroup",
            subnet_ids=scope.vpc.select_subnets(
                subnet_type=ec2.SubnetType.ISOLATED
            ).subnet_ids,
            db_subnet_group_description=f"{scope.full_app_name}-db-subnet-group",
        )

        self.db_config = {
            "engine_mode": "serverless",
            "engine": "aurora-postgresql",
            "engine_version": "10.7",
            # "port": 5432,
            "enable_http_endpoint": True,
            "master_username": self.db_secret.secret_value_from_json(
                "username"
            ).to_string(),
            "master_user_password": self.db_secret.secret_value_from_json(
                "password"
            ).to_string(),
            "vpc_security_group_ids": [
                self.db_security_group.get_att("GroupId").to_string()
            ],
            "db_subnet_group_name": self.db_subnet_group.ref,
        }

        self.rds_cluster = rds.CfnDBCluster(
            self, "DBCluster", **self.db_config
        )
示例#9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self, "VPC")
        db_master_user_name = os.getenv("DB_USERNAME", "admin_user")

        self.secret = rds.DatabaseSecret(
            self, id="MasterUserSecret", username=db_master_user_name
        )

        rds.CfnDBSubnetGroup(
            self,
            "rdsSubnetGroup",
            db_subnet_group_description="private subnets for rds",
            subnet_ids=vpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE
            ).subnet_ids,
        )
        db_name = os.getenv("DB_NAME", "anonfed")
        self.db = rds.CfnDBCluster(
            self,
            "auroraCluster",
            engine="aurora-mysql",
            engine_version="5.7.mysql_aurora.2.08.1",
            db_cluster_parameter_group_name="default.aurora-mysql5.7",
            # snapshot_identifier="<snapshot_arn>",  # your snapshot
            engine_mode="serverless",
            scaling_configuration=rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=True,
                min_capacity=1,
                max_capacity=4,
                seconds_until_auto_pause=300,
            ),
            db_subnet_group_name=core.Fn.ref("rdsSubnetGroup"),
            database_name=db_name,
            master_username=self.secret.secret_value_from_json("username").to_string(),
            master_user_password=self.secret.secret_value_from_json(
                "password"
            ).to_string(),
            enable_http_endpoint=True,
        )

        secret_attached = sm.CfnSecretTargetAttachment(
            self,
            id="secret_attachment",
            secret_id=self.secret.secret_arn,
            target_id=self.db.ref,
            target_type="AWS::RDS::DBCluster",
        )
        secret_attached.node.add_dependency(self.db)
        db_ref = f"arn:aws:rds:{self.region}:{self.account}:cluster:{self.db.ref}"
        migration = SchemaMigrationResource(
            self, "schemamigration", self.secret.secret_arn, db_name, db_ref
        )

        # Publish the custom resource output
        core.CfnOutput(
            self,
            "ResponseMessage",
            description="Database Migration",
            value=migration.response,
        )

        core.CfnOutput(
            self,
            id="DatabaseName",
            value=self.db.database_name,
            description="Database Name",
            export_name=f"{self.region}:{self.account}:{self.stack_name}:database-name",
        )

        core.CfnOutput(
            self,
            id="DatabaseClusterArn",
            value=db_ref,
            description="Database Cluster Arn",
            export_name=f"{self.region}:{self.account}:{self.stack_name}:database-cluster-arn",
        )

        core.CfnOutput(
            self,
            id="DatabaseSecretArn",
            value=self.secret.secret_arn,
            description="Database Secret Arn",
            export_name=f"{self.region}:{self.account}:{self.stack_name}:database-secret-arn",
        )

        core.CfnOutput(
            self,
            id="AuroraEndpointAddress",
            value=self.db.attr_endpoint_address,
            description="Aurora Endpoint Address",
            export_name=f"{self.region}:{self.account}:{self.stack_name}:aurora-endpoint-address",
        )

        core.CfnOutput(
            self,
            id="DatabaseMasterUserName",
            value=db_master_user_name,
            description="Database Master User Name",
            export_name=f"{self.region}:{self.account}:{self.stack_name}:database-master-username",
        )