コード例 #1
0
    def create_redis(self, vpc):
        subnetGroup = ec.CfnSubnetGroup(
            self,
            "RedisClusterPrivateSubnetGroup",
            cache_subnet_group_name="recommendations-redis-subnet-group",
            subnet_ids=[subnet.subnet_id for subnet in vpc.private_subnets],
            description="Redis subnet for recommendations"
        )

        redis_security_group = ec2.SecurityGroup(self, "redis-security-group", vpc=vpc)

        redis_connections = ec2.Connections(
            security_groups=[redis_security_group], default_port=ec2.Port.tcp(6379)
        )
        redis_connections.allow_from_any_ipv4(port_range=ec2.Port.tcp(6379))

        redis = ec.CfnCacheCluster(
            self,
            "RecommendationsRedisCacheCluster",
            engine="redis",
            cache_node_type="cache.t2.small",
            num_cache_nodes=1,
            cluster_name="redis-gw",
            vpc_security_group_ids=[redis_security_group.security_group_id],
            cache_subnet_group_name=subnetGroup.cache_subnet_group_name
        )
        redis.add_depends_on(subnetGroup)
コード例 #2
0
    def create_redis(self, vpc, eks):
        # create subnet group
        subnet_group = elasticache.CfnSubnetGroup(
            self,
            "RedisClusterPrivateSubnetGroup",
            cache_subnet_group_name="redis-springboot-multiarch",
            subnet_ids=vpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnet_ids,
            description="springboot multiarch demo")
        # create security group
        security_group = ec2.SecurityGroup(
            self,
            "RedisSecurityGroup",
            vpc=vpc,
            description="Allow redis connection from eks",
            allow_all_outbound=True)
        eks.connections.allow_to(security_group, ec2.Port.tcp(6379))
        # create redis cluster
        redis = elasticache.CfnCacheCluster(
            self,
            "RedisCluster",
            engine="redis",
            cache_node_type="cache.t2.small",
            num_cache_nodes=1,
            cluster_name="redis-springboot-multiarch",
            vpc_security_group_ids=[security_group.security_group_id],
            cache_subnet_group_name=subnet_group.cache_subnet_group_name)
        redis.add_depends_on(subnet_group)

        return redis
コード例 #3
0
ファイル: cache_stack.py プロジェクト: jaydhulia/consoleme
    def __init__(self, scope: cdk.Construct, id: str, vpc: ec2.Vpc,
                 redis_sg: ec2.SecurityGroup, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Redis node

        subnet_ids = []
        for subnet in vpc.private_subnets:
            subnet_ids.append(subnet.subnet_id)

        redis_subnet_group = ec.CfnSubnetGroup(
            self,
            "RedisSubnetGroup",
            cache_subnet_group_name="redis-subnet-group",
            description="Subnet group for Redis Cluster",
            subnet_ids=subnet_ids,
        )

        redis = ec.CfnCacheCluster(
            self,
            "RedisCluster",
            cache_node_type="cache.t3.micro",
            engine="redis",
            engine_version="6.x",
            num_cache_nodes=1,
            auto_minor_version_upgrade=True,
            cache_subnet_group_name=redis_subnet_group.ref,
            vpc_security_group_ids=[redis_sg.security_group_id],
        )

        self.redis = redis
コード例 #4
0
 def create_elasticache(self):
     cache = elasticache.CfnCacheCluster(
         self,
         guid('ELASTICACHE-'),
         cache_node_type="cache.t3.micro",
         engine="redis",
         num_cache_nodes=1,
         port=6379,
         az_mode="cross-az",
         vpc_security_group_ids=['vpc-1657f27d'])
     return cache
コード例 #5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create new VPC
        vpc = ec2.Vpc(
            self,
            "Default",
            max_azs=3,
            nat_gateways=1,
            cidr=ec2.Vpc.DEFAULT_CIDR_RANGE,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="Private-Subnet",
                                        subnet_type=ec2.SubnetType.PRIVATE,
                                        cidr_mask=19,
                                        reserved=None),
                ec2.SubnetConfiguration(name="Public-Subnet",
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=22,
                                        reserved=None),
                ec2.SubnetConfiguration(name="Isolated-Subnet",
                                        subnet_type=ec2.SubnetType.ISOLATED,
                                        cidr_mask=28,
                                        reserved=None)
            ])

        # Try subnet group
        subnet_group = elasticache.CfnSubnetGroup(
            scope=self,
            id="Testing-Subnet-Group",
            description="Group private subnets for redis access.",
            subnet_ids=[subnet.subnet_id for subnet in vpc.private_subnets],
            cache_subnet_group_name="test-int-private-subnets")
        redis_security_group = ec2.SecurityGroup(scope=self,
                                                 id="TEMP-redis-SG",
                                                 vpc=vpc,
                                                 allow_all_outbound=False)

        redis_cluster = elasticache.CfnCacheCluster(
            scope=self,
            cache_node_type="cache.t2.micro",
            id="testmy-redis",
            engine="redis",
            num_cache_nodes=1,
            vpc_security_group_ids=[redis_security_group.security_group_id],
            cache_subnet_group_name=subnet_group.ref,
            cluster_name="testmy-redis")
コード例 #6
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc.from_vpc_attributes(
            self,
            'vpc',
            vpc_id=vpc.vpc_id,
            availability_zones=['us-east-1a', 'us-east-1b'])

        subnets = [ps.subnet_id for ps in vpc.private_subnets]

        sg = ec2.SecurityGroup(self,
                               'rdsSG',
                               vpc=vpc,
                               security_group_name='RedisSG',
                               description="SG for Redis Cache",
                               allow_all_outbound=True)
        sg.add_ingress_rule(ec2.Peer.any_ipv4(),
                            ec2.Port.all_tcp(),
                            description="SSH Access")

        subnet_group = redis.CfnSubnetGroup(
            self,
            'redis-sg',
            subnet_ids=subnets,
            description='subnet group for redis')

        redis_cluster = redis.CfnCacheCluster(
            self,
            'redis',
            cache_node_type='cache.t2.small',
            engine='redis',
            num_cache_nodes=1,
            cluster_name='msa-redis-dev',
            cache_subnet_group_name=subnet_group.ref,
            vpc_security_group_ids=[sg.security_group_id],
            auto_minor_version_upgrade=True)
        redis_cluster.add_depends_on(subnet_group)

        core.CfnOutput(self,
                       'redissg',
                       value=sg.security_group_id,
                       export_name='redis-sg-export')
コード例 #7
0
ファイル: storage.py プロジェクト: victoraguilarc/wise-cdk
def create_redis_cache(scope: core.Construct, stack_name: str, vpc: IVpc,
                       config: StackConfig):
    subnet_ids = vpc.select_subnets(
        subnet_type=ec2.SubnetType.PRIVATE).subnet_ids

    cache_subnet_group = elasticache.CfnSubnetGroup(
        scope,
        'cacheSubnetGroup',
        cache_subnet_group_name=f'{stack_name}-redis',
        description=stack_name,
        subnet_ids=subnet_ids,
    )
    cache_security_group = ec2.SecurityGroup(
        scope,
        'cacheSecurityGroup',
        vpc=vpc,
        allow_all_outbound=True,
        security_group_name=f'{stack_name}-redis',
        description=stack_name,
    )
    cache = elasticache.CfnCacheCluster(
        scope,
        'elasticache',
        cluster_name=stack_name,
        engine='redis',
        port=6379,
        cache_node_type=config.cache_node_type,
        num_cache_nodes=config.num_cache_nodes,
        cache_subnet_group_name=cache_subnet_group.cache_subnet_group_name,
        vpc_security_group_ids=[cache_security_group.security_group_id],
    )

    cache_security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                          ec2.Port.tcp(6379),
                                          'Allow Cache Access')

    core.CfnOutput(scope=scope,
                   id='redisAddress',
                   value=cache.attr_redis_endpoint_address)

    return cache
コード例 #8
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 redissq: ec2.SecurityGroup, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context('project_name')
        env_name = self.node.try_get_context('env')

        subnets = [subnet.subnet_id for subnet in vpc.private_subnets]

        subnet_group = redis.CfnSubnetGroup(
            self,
            id=f'{env_name}-redis-subnet-group',
            subnet_ids=subnets,
            description='subnet group for redis')

        redis_cluster = redis.CfnCacheCluster(
            self,
            id=f'{env_name}-redis',
            cache_node_type='cache.t2.small',
            engine='redis',
            num_cache_nodes=1,
            cluster_name=f'{env_name}-cdk-redis',
            cache_subnet_group_name=subnet_group.ref,
            vpc_security_group_ids=[redissq.security_group_id],
            auto_minor_version_upgrade=True)
        # the redis cluster must wait for the subnet_group
        redis_cluster.add_depends_on(subnet_group)

        ssm.StringParameter(
            self,
            id=f'{env_name}-redis-endpoint',
            parameter_name='/' + env_name + '/redis-endpoint',
            string_value=redis_cluster.attr_redis_endpoint_address)

        ssm.StringParameter(
            self,
            id=f'{env_name}-redis-port',
            parameter_name='/' + env_name + '/redis-port',
            string_value=redis_cluster.attr_redis_endpoint_port)
コード例 #9
0
ファイル: elasticache.py プロジェクト: anilvrathod1/bestCDK
    def __init__(self, scope: core.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.elasticache_security_group = ec2.CfnSecurityGroup(
            self,
            "ElastiCacheSecurityGroup",
            vpc_id=scope.vpc.vpc_id,
            group_description="ElastiCacheSecurityGroup",
            security_group_ingress=[
                ec2.CfnSecurityGroup.IngressProperty(
                    ip_protocol="tcp",
                    to_port=6379,
                    from_port=6379,
                    source_security_group_id=scope.vpc.
                    vpc_default_security_group,
                )
            ],
        )

        self.elasticache_subnet_group = elasticache.CfnSubnetGroup(
            self,
            "CfnSubnetGroup",
            subnet_ids=scope.vpc.select_subnets(
                subnet_type=ec2.SubnetType.ISOLATED).subnet_ids,
            description="The subnet group for ElastiCache",
        )

        self.elasticache = elasticache.CfnCacheCluster(
            self,
            "ElastiCacheClusterRedis",
            cache_node_type="cache.t2.micro",
            engine="redis",
            num_cache_nodes=1,
            vpc_security_group_ids=[
                self.elasticache_security_group.get_att("GroupId").to_string()
            ],
            cache_subnet_group_name=self.elasticache_subnet_group.ref,  # noqa
        )
コード例 #10
0
    def __init__(self, scope: core.Construct, id: str, vpc, redissg,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        subnets = [subnet.subnet_id for subnet in vpc.private_subnets]

        subnet_group = redis.CfnSubnetGroup(
            self,
            'redis-subnet-group',
            subnet_ids=subnets,
            description="subnet group for redis")

        redis_cluster = redis.CfnCacheCluster(
            self,
            'redis',
            cache_node_type='cache.t2.small',
            engine='redis',
            num_cache_nodes=1,
            cluster_name=prj_name + '-redis-' + env_name,
            cache_subnet_group_name=subnet_group.ref,
            vpc_security_group_ids=[redissg],
            auto_minor_version_upgrade=True)
        redis_cluster.add_depends_on(subnet_group)

        ssm.StringParameter(
            self,
            'redis-endpoint',
            parameter_name='/' + env_name + '/redis-endpoint',
            string_value=redis_cluster.attr_redis_endpoint_address)

        ssm.StringParameter(
            self,
            'redis-port',
            parameter_name='/' + env_name + '/redis-port',
            string_value=redis_cluster.attr_redis_endpoint_port)
コード例 #11
0
    def __init__(self, scope: core.Construct, id: str, vpc: VpcStack,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        subnet_group = ecache.CfnSubnetGroup(
            self,
            "RedisClusterSG",
            subnet_ids=vpc.get_vpc_private_subnet_ids,
            description="Airflow Redis private subnet group",
        )

        redis = ecache.CfnCacheCluster(
            self,
            "AirflowRedis",
            engine="redis",
            port=6379,
            cache_node_type="cache.t2.small",
            num_cache_nodes=1,
            cluster_name="airflow-redis",
            vpc_security_group_ids=[vpc.redis_sg.security_group_id],
            cache_subnet_group_name=subnet_group.ref,
        )
        self._instance = redis
コード例 #12
0
    def create_redis(self, vpc: ec2.IVpc):
        selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PUBLIC)

        redis_security_group = ec2.SecurityGroup(self,
                                                 id='redis-security-group',
                                                 vpc=vpc)
        redis_security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                              ec2.Port.tcp(6379),
                                              "Incoming to Redis")

        redis_subnet_group = elasticache.CfnSubnetGroup(
            self,
            "RedisClusterPrivateSubnetGroup",
            cache_subnet_group_name="redis-subnet-group",
            description="Tubby Redis Subnet",
            subnet_ids=selection.subnet_ids)

        redis_parameter_group = elasticache.CfnParameterGroup(
            self,
            "RedisParameterGroup",
            description="Redis Params",
            cache_parameter_group_family="redis6.x",
            properties={},
        )

        redis = elasticache.CfnCacheCluster(
            self,
            "RedisCacheCluster",
            engine="redis",
            cache_node_type="cache.t2.micro",
            num_cache_nodes=1,
            cluster_name="startuptoolbag-redis",
            vpc_security_group_ids=[redis_security_group.security_group_id],
            cache_subnet_group_name=redis_subnet_group.cache_subnet_group_name,
            cache_parameter_group_name=redis_parameter_group.ref,
        )
        return redis
コード例 #13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = aws_ec2.Vpc(
            self,
            "OctemberVPC",
            max_azs=2,
            #      subnet_configuration=[{
            #          "cidrMask": 24,
            #          "name": "Public",
            #          "subnetType": aws_ec2.SubnetType.PUBLIC,
            #        },
            #        {
            #          "cidrMask": 24,
            #          "name": "Private",
            #          "subnetType": aws_ec2.SubnetType.PRIVATE
            #        },
            #        {
            #          "cidrMask": 28,
            #          "name": "Isolated",
            #          "subnetType": aws_ec2.SubnetType.ISOLATED,
            #          "reserved": True
            #        }
            #      ],
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        dynamo_db_endpoint = vpc.add_gateway_endpoint(
            "DynamoDbEndpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB)

        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            bucket_name="octember-bizcard-{region}-{account}".format(
                region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID))

        api = apigw.RestApi(
            self,
            "BizcardImageUploader",
            rest_api_name="BizcardImageUploader",
            description="This service serves uploading bizcard images into s3.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            binary_media_types=["image/png", "image/jpg"],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        rest_api_role = aws_iam.Role(
            self,
            "ApiGatewayRoleForS3",
            role_name="ApiGatewayRoleForS3FullAccess",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess")
            ])

        list_objects_responses = [
            apigw.IntegrationResponse(
                status_code="200",
                #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html#aws_cdk.aws_apigateway.IntegrationResponse.response_parameters
                # The response parameters from the backend response that API Gateway sends to the method response.
                # Use the destination as the key and the source as the value:
                #  - The destination must be an existing response parameter in the MethodResponse property.
                #  - The source must be an existing method request parameter or a static value.
                response_parameters={
                    'method.response.header.Timestamp':
                    'integration.response.header.Date',
                    'method.response.header.Content-Length':
                    'integration.response.header.Content-Length',
                    'method.response.header.Content-Type':
                    'integration.response.header.Content-Type'
                }),
            apigw.IntegrationResponse(status_code="400",
                                      selection_pattern="4\d{2}"),
            apigw.IntegrationResponse(status_code="500",
                                      selection_pattern="5\d{2}")
        ]

        list_objects_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses)

        get_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path='/',
            options=list_objects_integration_options)

        api.root.add_method(
            "GET",
            get_s3_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={'method.request.header.Content-Type': False})

        get_s3_folder_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html#aws_cdk.aws_apigateway.IntegrationOptions.request_parameters
            # Specify request parameters as key-value pairs (string-to-string mappings), with a destination as the key and a source as the value.
            # The source must be an existing method request parameter or a static value.
            request_parameters={
                "integration.request.path.bucket": "method.request.path.folder"
            })

        get_s3_folder_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}",
            options=get_s3_folder_integration_options)

        s3_folder = api.root.add_resource('{folder}')
        s3_folder.add_method(
            "GET",
            get_s3_folder_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True
            })

        get_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            request_parameters={
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        get_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}/{object}",
            options=get_s3_item_integration_options)

        s3_item = s3_folder.add_resource('{item}')
        s3_item.add_method(
            "GET",
            get_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        put_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[
                apigw.IntegrationResponse(status_code="200"),
                apigw.IntegrationResponse(status_code="400",
                                          selection_pattern="4\d{2}"),
                apigw.IntegrationResponse(status_code="500",
                                          selection_pattern="5\d{2}")
            ],
            request_parameters={
                "integration.request.header.Content-Type":
                "method.request.header.Content-Type",
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        put_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="PUT",
            path="{bucket}/{object}",
            options=put_s3_item_integration_options)

        s3_item.add_method(
            "PUT",
            put_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        ddb_table = dynamodb.Table(
            self,
            "BizcardImageMetaInfoDdbTable",
            table_name="OctemberBizcardImgMeta",
            partition_key=dynamodb.Attribute(
                name="image_id", type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PROVISIONED,
            read_capacity=15,
            write_capacity=5)

        img_kinesis_stream = kinesis.Stream(
            self, "BizcardImagePath", stream_name="octember-bizcard-image")

        # create lambda function
        trigger_textract_lambda_fn = _lambda.Function(
            self,
            "TriggerTextExtractorFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="TriggerTextExtractorFromImage",
            handler="trigger_text_extract_from_s3_image.lambda_handler",
            description="Trigger to extract text from an image in S3",
            code=_lambda.Code.asset(
                "./src/main/python/TriggerTextExtractFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': img_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        ddb_table_rw_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            resources=[ddb_table.table_arn],
            actions=[
                "dynamodb:BatchGetItem", "dynamodb:Describe*",
                "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query",
                "dynamodb:Scan", "dynamodb:BatchWriteItem",
                "dynamodb:DeleteItem", "dynamodb:PutItem",
                "dynamodb:UpdateItem", "dax:Describe*", "dax:List*",
                "dax:GetItem", "dax:BatchGetItem", "dax:Query", "dax:Scan",
                "dax:BatchWriteItem", "dax:DeleteItem", "dax:PutItem",
                "dax:UpdateItem"
            ])

        trigger_textract_lambda_fn.add_to_role_policy(
            ddb_table_rw_policy_statement)
        trigger_textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[img_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3_event_filter = s3.NotificationKeyFilter(prefix="bizcard-raw-img/",
                                                   suffix=".jpg")
        s3_event_source = S3EventSource(s3_bucket,
                                        events=[s3.EventType.OBJECT_CREATED],
                                        filters=[s3_event_filter])
        trigger_textract_lambda_fn.add_event_source(s3_event_source)

        #XXX: https://github.com/aws/aws-cdk/issues/2240
        # To avoid to create extra Lambda Functions with names like LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8a
        # if log_retention=aws_logs.RetentionDays.THREE_DAYS is added to the constructor props
        log_group = aws_logs.LogGroup(
            self,
            "TriggerTextractLogGroup",
            log_group_name="/aws/lambda/TriggerTextExtractorFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(trigger_textract_lambda_fn)

        text_kinesis_stream = kinesis.Stream(
            self, "BizcardTextData", stream_name="octember-bizcard-txt")

        textract_lambda_fn = _lambda.Function(
            self,
            "GetTextFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="GetTextFromImage",
            handler="get_text_from_s3_image.lambda_handler",
            description="extract text from an image in S3",
            code=_lambda.Code.asset("./src/main/python/GetTextFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': text_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        textract_lambda_fn.add_to_role_policy(ddb_table_rw_policy_statement)
        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["textract:*"]))

        img_kinesis_event_source = KinesisEventSource(
            img_kinesis_stream,
            batch_size=100,
            starting_position=_lambda.StartingPosition.LATEST)
        textract_lambda_fn.add_event_source(img_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "GetTextFromImageLogGroup",
            log_group_name="/aws/lambda/GetTextFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(textract_lambda_fn)

        sg_use_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard elasticsearch client',
            security_group_name='use-octember-bizcard-es')
        core.Tags.of(sg_use_bizcard_es).add('Name', 'use-octember-bizcard-es')

        sg_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard elasticsearch',
            security_group_name='octember-bizcard-es')
        core.Tags.of(sg_bizcard_es).add('Name', 'octember-bizcard-es')

        sg_bizcard_es.add_ingress_rule(peer=sg_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='octember-bizcard-es')
        sg_bizcard_es.add_ingress_rule(peer=sg_use_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='use-octember-bizcard-es')

        sg_ssh_access = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for bastion host',
            security_group_name='octember-bastion-host-sg')
        core.Tags.of(sg_ssh_access).add('Name', 'octember-bastion-host')
        sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                                       connection=aws_ec2.Port.tcp(22),
                                       description='ssh access')

        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=aws_ec2.InstanceType('t3.nano'),
            security_group=sg_ssh_access,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
        bastion_host.instance.add_security_group(sg_use_bizcard_es)

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            'BizcardSearch',
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name="octember-bizcard",
            elasticsearch_version="7.9",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(service="es",
                                    resource="domain",
                                    resource_name="octember-bizcard/*")
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_bizcard_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids
            })
        core.Tags.of(es_cfn_domain).add('Name', 'octember-bizcard-es')

        s3_lib_bucket_name = self.node.try_get_context("lib_bucket_name")

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        s3_lib_bucket = s3.Bucket.from_bucket_name(self, id,
                                                   s3_lib_bucket_name)
        es_lib_layer = _lambda.LayerVersion(
            self,
            "ESLib",
            layer_version_name="es-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-es-lib.zip"))

        redis_lib_layer = _lambda.LayerVersion(
            self,
            "RedisLib",
            layer_version_name="redis-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-redis-lib.zip"))

        #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342
        upsert_to_es_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToES",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToElasticSearch",
            handler="upsert_bizcard_to_es.lambda_handler",
            description="Upsert bizcard text into elasticsearch",
            code=_lambda.Code.asset("./src/main/python/UpsertBizcardToES"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard'
            },
            timeout=core.Duration.minutes(5),
            layers=[es_lib_layer],
            security_groups=[sg_use_bizcard_es],
            vpc=vpc)

        text_kinesis_event_source = KinesisEventSource(
            text_kinesis_stream,
            batch_size=99,
            starting_position=_lambda.StartingPosition.LATEST)
        upsert_to_es_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToESLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToElasticSearch",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_es_lambda_fn)

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "glue:GetTable",
                                        "glue:GetTableVersion",
                                        "glue:GetTableVersions"
                                    ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:DescribeStream",
                                        "kinesis:GetShardIterator",
                                        "kinesis:GetRecords"
                                    ]))

        firehose_log_group_name = "/aws/kinesisfirehose/octember-bizcard-txt-to-s3"
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(service="logs",
                                    resource="log-group",
                                    resource_name="{}:log-stream:*".format(
                                        firehose_log_group_name),
                                    sep=":")
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name="FirehoseDeliveryRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        bizcard_text_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "BizcardTextToS3",
            delivery_stream_name="octember-bizcard-txt-to-s3",
            delivery_stream_type="KinesisStreamAsSource",
            kinesis_stream_source_configuration={
                "kinesisStreamArn": text_kinesis_stream.stream_arn,
                "roleArn": firehose_role.role_arn
            },
            extended_s3_destination_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Delivery"
                },
                "compressionFormat": "GZIP",
                "prefix": "bizcard-text/",
                "roleArn": firehose_role.role_arn
            })

        sg_use_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache client',
            security_group_name='use-octember-bizcard-es-cache')
        core.Tags.of(sg_use_bizcard_es_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache',
            security_group_name='octember-bizcard-es-cache')
        core.Tags.of(sg_bizcard_es_cache).add('Name',
                                              'octember-bizcard-es-cache')

        sg_bizcard_es_cache.add_ingress_rule(
            peer=sg_use_bizcard_es_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-es-cache')

        es_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "QueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-es-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-es-cache')

        es_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardSearchQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-es-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=es_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-es-cache',
            vpc_security_group_ids=[sg_bizcard_es_cache.security_group_id])

        #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster.
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname
        es_query_cache.add_depends_on(es_query_cache_subnet_group)

        #XXX: add more than 2 security groups
        # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387
        # https://github.com/aws/aws-cdk/issues/1555
        # https://github.com/aws/aws-cdk/pull/5049
        bizcard_search_lambda_fn = _lambda.Function(
            self,
            "BizcardSearchServer",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardSearchProxy",
            handler="es_search_bizcard.lambda_handler",
            description="Proxy server to search bizcard text",
            code=_lambda.Code.asset("./src/main/python/SearchBizcard"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard',
                'ELASTICACHE_HOST': es_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[es_lib_layer, redis_lib_layer],
            security_groups=[sg_use_bizcard_es, sg_use_bizcard_es_cache],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        search_api = apigw.LambdaRestApi(
            self,
            "BizcardSearchAPI",
            handler=bizcard_search_lambda_fn,
            proxy=False,
            rest_api_name="BizcardSearch",
            description="This service serves searching bizcard text.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_search = search_api.root.add_resource('search')
        bizcard_search.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sg_use_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db client',
            security_group_name='use-octember-bizcard-neptune')
        core.Tags.of(sg_use_bizcard_graph_db).add(
            'Name', 'use-octember-bizcard-neptune')

        sg_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db',
            security_group_name='octember-bizcard-neptune')
        core.Tags.of(sg_bizcard_graph_db).add('Name',
                                              'octember-bizcard-neptune')

        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='octember-bizcard-neptune')
        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_use_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='use-octember-bizcard-neptune')

        bizcard_graph_db_subnet_group = aws_neptune.CfnDBSubnetGroup(
            self,
            "NeptuneSubnetGroup",
            db_subnet_group_description=
            "subnet group for octember-bizcard-neptune",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            db_subnet_group_name='octember-bizcard-neptune')

        bizcard_graph_db = aws_neptune.CfnDBCluster(
            self,
            "BizcardGraphDB",
            availability_zones=vpc.availability_zones,
            db_subnet_group_name=bizcard_graph_db_subnet_group.
            db_subnet_group_name,
            db_cluster_identifier="octember-bizcard",
            backup_retention_period=1,
            preferred_backup_window="08:45-09:15",
            preferred_maintenance_window="sun:18:00-sun:18:30",
            vpc_security_group_ids=[sg_bizcard_graph_db.security_group_id])
        bizcard_graph_db.add_depends_on(bizcard_graph_db_subnet_group)

        bizcard_graph_db_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[0],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_instance.add_depends_on(bizcard_graph_db)

        bizcard_graph_db_replica_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBReplicaInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[-1],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard-replica",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_replica_instance.add_depends_on(bizcard_graph_db)
        bizcard_graph_db_replica_instance.add_depends_on(
            bizcard_graph_db_instance)

        gremlinpython_lib_layer = _lambda.LayerVersion(
            self,
            "GremlinPythonLib",
            layer_version_name="gremlinpython-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(
                s3_lib_bucket, "var/octember-gremlinpython-lib.zip"))

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        upsert_to_neptune_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToGraphDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToNeptune",
            handler="upsert_bizcard_to_graph_db.lambda_handler",
            description="Upsert bizcard into neptune",
            code=_lambda.Code.asset(
                "./src/main/python/UpsertBizcardToGraphDB"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port
            },
            timeout=core.Duration.minutes(5),
            layers=[gremlinpython_lib_layer],
            security_groups=[sg_use_bizcard_graph_db],
            vpc=vpc)

        upsert_to_neptune_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToGraphDBLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToNeptune",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_neptune_lambda_fn)

        sg_use_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache client',
            security_group_name='use-octember-bizcard-neptune-cache')
        core.Tags.of(sg_use_bizcard_neptune_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache',
            security_group_name='octember-bizcard-neptune-cache')
        core.Tags.of(sg_bizcard_neptune_cache).add(
            'Name', 'octember-bizcard-neptune-cache')

        sg_bizcard_neptune_cache.add_ingress_rule(
            peer=sg_use_bizcard_neptune_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-neptune-cache')

        recomm_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "RecommQueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-neptune-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-neptune-cache')

        recomm_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardRecommQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-neptune-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=recomm_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-neptune-cache',
            vpc_security_group_ids=[
                sg_bizcard_neptune_cache.security_group_id
            ])

        recomm_query_cache.add_depends_on(recomm_query_cache_subnet_group)

        bizcard_recomm_lambda_fn = _lambda.Function(
            self,
            "BizcardRecommender",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardRecommender",
            handler="neptune_recommend_bizcard.lambda_handler",
            description="This service serves PYMK(People You May Know).",
            code=_lambda.Code.asset("./src/main/python/RecommendBizcard"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_read_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port,
                'ELASTICACHE_HOST':
                recomm_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[gremlinpython_lib_layer, redis_lib_layer],
            security_groups=[
                sg_use_bizcard_graph_db, sg_use_bizcard_neptune_cache
            ],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        recomm_api = apigw.LambdaRestApi(
            self,
            "BizcardRecommendAPI",
            handler=bizcard_recomm_lambda_fn,
            proxy=False,
            rest_api_name="BizcardRecommend",
            description="This service serves PYMK(People You May Know).",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_recomm = recomm_api.root.add_resource('pymk')
        bizcard_recomm.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument()
        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:s3:::aws-neptune-notebook",
                        "arn:aws:s3:::aws-neptune-notebook/*"
                    ],
                    "actions": ["s3:GetObject", "s3:ListBucket"]
                }))

        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:neptune-db:{region}:{account}:{cluster_id}/*".
                        format(region=core.Aws.REGION,
                               account=core.Aws.ACCOUNT_ID,
                               cluster_id=bizcard_graph_db.
                               attr_cluster_resource_id)
                    ],
                    "actions": ["neptune-db:connect"]
                }))

        sagemaker_notebook_role = aws_iam.Role(
            self,
            'SageMakerNotebookForNeptuneWorkbenchRole',
            role_name='AWSNeptuneNotebookRole-OctemberBizcard',
            assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={
                'AWSNeptuneNotebook': sagemaker_notebook_role_policy_doc
            })

        neptune_wb_lifecycle_content = '''#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_HOST={NeptuneClusterEndpoint}" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_PORT={NeptuneClusterPort}" >> ~/.bashrc
echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=''" >> ~/.bashrc
echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc
aws s3 cp s3://aws-neptune-notebook/graph_notebook.tar.gz /tmp/graph_notebook.tar.gz
rm -rf /tmp/graph_notebook
tar -zxvf /tmp/graph_notebook.tar.gz -C /tmp
/tmp/graph_notebook/install.sh
EOF
'''.format(NeptuneClusterEndpoint=bizcard_graph_db.attr_endpoint,
           NeptuneClusterPort=bizcard_graph_db.attr_port,
           AWS_Region=core.Aws.REGION)

        neptune_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty(
            content=core.Fn.base64(neptune_wb_lifecycle_content))

        neptune_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig(
            self,
            'NpetuneWorkbenchLifeCycleConfig',
            notebook_instance_lifecycle_config_name=
            'AWSNeptuneWorkbenchOctemberBizcardLCConfig',
            on_start=[neptune_wb_lifecycle_config_prop])

        neptune_workbench = aws_sagemaker.CfnNotebookInstance(
            self,
            'NeptuneWorkbench',
            instance_type='ml.t2.medium',
            role_arn=sagemaker_notebook_role.role_arn,
            lifecycle_config_name=neptune_wb_lifecycle_config.
            notebook_instance_lifecycle_config_name,
            notebook_instance_name='OctemberBizcard-NeptuneWorkbench',
            root_access='Disabled',
            security_group_ids=[sg_use_bizcard_graph_db.security_group_name],
            subnet_id=bizcard_graph_db_subnet_group.subnet_ids[0])
コード例 #14
0
    def create_redis(stack, vpc, is_group=False):
        print(vpc.private_subnets)
        subnetGroup = ec.CfnSubnetGroup(
            stack,
            "RedisClusterPrivateSubnetGroup-test",
            cache_subnet_group_name="recommendations-redis-subnet-group-test",
            description="Redis subnet for recommendations",
            subnet_ids=[subnet.subnet_id for subnet in vpc.private_subnets]
        )

        redis_security_group = ec2.SecurityGroup(
            stack, 
            "redis-security-group-test", 
            vpc=vpc
        )

        redis_connections = ec2.Connections(
            security_groups=[redis_security_group], 
            default_port=ec2.Port.tcp(6379)
        )
        redis_connections.allow_from_any_ipv4(port_range=ec2.Port.tcp(6379))


        if is_group:
            #group
            redis = ec.CfnReplicationGroup(
                stack,
                "RecommendationsRedisCacheCluster",
                engine="redis",
                cache_node_type="cache.t2.small",
                replicas_per_node_group=1,
                num_node_groups=3,
                replication_group_description="redis-gw-test",
                automatic_failover_enabled=True,
                security_group_ids=[redis_security_group.security_group_id],
                cache_subnet_group_name=subnetGroup.cache_subnet_group_name
            )
        else:
            # one node
            redis = ec.CfnCacheCluster(
                stack,
                "RecommendationsRedisCacheCluster",
                engine="redis",
                cache_node_type="cache.t2.small",
                num_cache_nodes=1,
                cluster_name="redis-gw-test",
                vpc_security_group_ids=[redis_security_group.security_group_id],
                cache_subnet_group_name=subnetGroup.cache_subnet_group_name
            )


        # no python sample, this is nodejs sample for group mode
        '''
        const redisReplication = new CfnReplicationGroup(
            this,
            `RedisReplicaGroup`,
            {
                engine: "redis",
                cacheNodeType: "cache.m5.xlarge",
                replicasPerNodeGroup: 1,
                numNodeGroups: 3,
                automaticFailoverEnabled: true,
                autoMinorVersionUpgrade: true,
                replicationGroupDescription: "cluster redis di produzione",
                cacheSubnetGroupName: redisSubnetGroup.cacheSubnetGroupName
            }
            );
        '''
        
        redis.add_depends_on(subnetGroup)

        if is_group:
            return redis.attr_primary_end_point_address,redis.attr_primary_end_point_port
        else:
            return redis.attr_redis_endpoint_address, redis.attr_redis_endpoint_port
コード例 #15
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # -- VPC
        vpc = ec2.Vpc(self, "vpc_airflow")
        # ecr
        ecr_repo = ecr.Repository.from_repository_name(self,
                                                       "ecr_repo_airflow",
                                                       "airflow")
        # rds
        sg_airflow_backend_db = ec2.SecurityGroup(
            self,
            "sg_airflow_backend_database",
            vpc=vpc,
            description="Airflow backend database",
            security_group_name="sg_airflow_backend_database",
        )
        db = rds.DatabaseInstance(
            self,
            "rds_airfow_backend",
            master_username="******",
            master_user_password=core.SecretValue.plain_text("postgres"),
            database_name="airflow",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_11_8),
            vpc=vpc,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE3,
                ec2.InstanceSize.MICRO,
            ),
            instance_identifier="airflow-backend",
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False,
            security_groups=[sg_airflow_backend_db],
            vpc_placement=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
        )
        # -- ElasticCache Redis
        sg_redis = ec2.SecurityGroup(
            self,
            "sg_redis",
            vpc=vpc,
            description="Airflow redis",
            security_group_name="sg_redis",
        )
        redis_subnet_group = ec.CfnSubnetGroup(
            self,
            "airflow-redis-subnet-group",
            description="For Airflow Task Queue",
            subnet_ids=vpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name="airflow-redis-task-queue",
        )
        redis = ec.CfnCacheCluster(
            self,
            "redis",
            cluster_name="airflow-redis",
            cache_node_type="cache.t2.micro",
            engine="redis",
            num_cache_nodes=1,
            auto_minor_version_upgrade=True,
            engine_version="5.0.6",
            port=REDIS_PORT,
            cache_subnet_group_name=redis_subnet_group.ref,
            vpc_security_group_ids=[sg_redis.security_group_id],
        )
        # ECS cluster
        cluster = ecs.Cluster(
            self,
            "ecs_airflow",
            cluster_name="airflow",
            vpc=vpc,
            container_insights=True,
        )
        # scheduler
        scheduler_task_role = iam.Role(
            self,
            "iam_role_scheduler",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS Scheduler service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-scheduler-task",
        )
        scheduler_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_scheduler",
            cpu=512,
            memory_limit_mib=2048,
            task_role=scheduler_task_role,
        )
        scheduler_task.add_container(
            "scheduler",
            command=["scheduler"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "POSTGRES_HOST": db.db_instance_endpoint_address,
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "POSTGRES_DB": "airflow",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="scheduler",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-scheduler",
                    log_group_name="ecs/airflow/scheduler",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        )
        sg_airflow_scheduler = ec2.SecurityGroup(
            self,
            "sg_airflow_scheduler",
            vpc=vpc,
            description="Airflow Scheduler service",
            security_group_name="sg_airflow_scheduler",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_scheduler,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from scheduler",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from scheduler service",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_scheduler,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from home",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="home",
        )
        scheduler_service = ecs.FargateService(
            self,
            "ecs_service_scheduler",
            cluster=cluster,
            task_definition=scheduler_task,
            desired_count=1,
            security_groups=[sg_airflow_scheduler],
            service_name="scheduler",
        )
        # flower
        flower_task_role = iam.Role(
            self,
            "iam_role_flower",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS Flower service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-flower-task",
        )
        flower_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_flower",
            cpu=512,
            memory_limit_mib=1024,
            task_role=scheduler_task_role,
        )
        flower_task.add_container(
            "flower",
            command=["flower"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="flower",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-flower",
                    log_group_name="ecs/airflow/flower",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        ).add_port_mappings(
            ecs.PortMapping(
                container_port=FLOWER_PORT,
                host_port=FLOWER_PORT,
                protocol=ecs.Protocol.TCP,
            ))
        sg_airflow_flower = ec2.SecurityGroup(
            self,
            "sg_airflow_flower",
            vpc=vpc,
            description="Airflow Flower service",
            security_group_name="sg_airflow_flower",
        )
        sg_airflow_flower.add_ingress_rule(
            peer=ec2.Peer.ipv4("115.66.217.45/32"),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from homr",
                from_port=FLOWER_PORT,
                to_port=FLOWER_PORT,
            ),
            description="from home",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_flower,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from flower",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from flower",
        )
        flower_service = ecs.FargateService(
            self,
            "ecs_service_flower",
            cluster=cluster,
            task_definition=flower_task,
            desired_count=1,
            security_groups=[sg_airflow_flower],
            service_name="flower",
        )
        # worker
        worker_task_role = iam.Role(
            self,
            "iam_role_worker",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS worker service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-worker-task",
        )
        worker_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_worker",
            cpu=1024,
            memory_limit_mib=3072,
            task_role=worker_task_role,
        )
        worker_task.add_container(
            "worker",
            command=["worker"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "POSTGRES_HOST": db.db_instance_endpoint_address,
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "POSTGRES_DB": "airflow",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="worker",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-worker",
                    log_group_name="ecs/airflow/worker",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        )
        sg_airflow_worker = ec2.SecurityGroup(
            self,
            "sg_airflow_worker",
            vpc=vpc,
            description="Airflow worker service",
            security_group_name="sg_airflow_worker",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_worker,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from worker",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from worker service",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_worker,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from worker",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="From worker",
        )
        worker_service = ecs.FargateService(
            self,
            "ecs_service_worker",
            cluster=cluster,
            task_definition=worker_task,
            desired_count=1,
            security_groups=[sg_airflow_worker],
            service_name="worker",
        )
        # web server
        web_server_task_role = iam.Role(
            self,
            "iam_role_web_server",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS web server service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-web-server-task",
        )
        web_server_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_web_server",
            cpu=512,
            memory_limit_mib=1024,
            task_role=web_server_task_role,
        )
        web_server_task.add_container(
            "web_server",
            command=["webserver"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "POSTGRES_HOST": db.db_instance_endpoint_address,
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "POSTGRES_DB": "airflow",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="web_server",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-web-server",
                    log_group_name="ecs/airflow/web-server",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        ).add_port_mappings(
            ecs.PortMapping(
                container_port=WEB_SERVER_PORT,
                host_port=WEB_SERVER_PORT,
                protocol=ecs.Protocol.TCP,
            ))
        sg_airflow_web_server = ec2.SecurityGroup(
            self,
            "sg_airflow_web_server",
            vpc=vpc,
            description="Airflow web server service",
            security_group_name="sg_airflow_web_server",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_web_server,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From web server",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="From web server",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_web_server,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From web server",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="From web server",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_web_server,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from web server",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from web server",
        )
        web_server_service = ecs.FargateService(
            self,
            "ecs_service_web_server",
            cluster=cluster,
            task_definition=web_server_task,
            desired_count=1,
            security_groups=[sg_airflow_web_server],
            service_name="web_server",
        )
        # Load balancer
        sg_airflow_alb = ec2.SecurityGroup(
            self,
            "sg_airflow_alb",
            vpc=vpc,
            description="Airflow ALB",
            security_group_name="sg_airflow_alb",
        )
        # ALB -> web server
        sg_airflow_web_server.add_ingress_rule(
            peer=sg_airflow_alb,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From ALB",
                from_port=WEB_SERVER_PORT,
                to_port=WEB_SERVER_PORT,
            ),
            description="From ALB",
        )
        # ALB -> flower
        sg_airflow_flower.add_ingress_rule(
            peer=sg_airflow_alb,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From ALB",
                from_port=FLOWER_PORT,
                to_port=FLOWER_PORT,
            ),
            description="From ALB",
        )
        # Home -> ALB
        sg_airflow_alb.add_ingress_rule(
            peer=ec2.Peer.ipv4(MY_IP_CIDR),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From Home",
                from_port=ALB_PORT,
                to_port=ALB_PORT,
            ),
            description="From Home",
        )
        # Home -> ALB
        sg_airflow_alb.add_ingress_rule(
            peer=ec2.Peer.ipv4(MY_IP_CIDR),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From Home",
                from_port=FLOWER_PORT,
                to_port=FLOWER_PORT,
            ),
            description="From Home",
        )
        alb = elb.ApplicationLoadBalancer(
            self,
            "alb_airflow",
            internet_facing=True,
            security_group=sg_airflow_alb,
            vpc=vpc,
            load_balancer_name="alb-airflow",
        )
        listener1 = alb.add_listener(
            "alb_airflow_listener1",
            open=False,
            port=ALB_PORT,
            protocol=elb.ApplicationProtocol.HTTP,
            default_target_groups=[
                elb.ApplicationTargetGroup(
                    self,
                    "alb_airflow_target_group_web_server",
                    port=WEB_SERVER_PORT,
                    protocol=elb.ApplicationProtocol.HTTP,
                    target_group_name="alb-tg-airflow-web-server",
                    targets=[web_server_service],
                    vpc=vpc,
                )
            ],
        )
        alb.add_listener(
            "alb_airflow_listener2",
            open=False,
            port=FLOWER_PORT,
            protocol=elb.ApplicationProtocol.HTTP,
            default_target_groups=[
                elb.ApplicationTargetGroup(
                    self,
                    "alb_airflow_target_group_flower",
                    port=FLOWER_PORT,
                    protocol=elb.ApplicationProtocol.HTTP,
                    target_group_name="alb-tg-aiflow-flower",
                    targets=[flower_service],
                    vpc=vpc,
                )
            ],
        )
コード例 #16
0
ファイル: app.py プロジェクト: nvidian7/aws-cdk-leaderboard
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = _ec2.Vpc.from_lookup(
            self, id="vpc", vpc_id=environment.AWS_VPC_ID)
        subnet_group = _elasticache.CfnSubnetGroup(self,
                                                   id="subnet-group",
                                                   description="The redis subnet group",
                                                   subnet_ids=list(map(lambda s: s.subnet_id, vpc.private_subnets)))

        security_group = _ec2.SecurityGroup.from_security_group_id(
            self, id="Security Group", security_group_id=environment.AWS_SECURITY_GROUP_ID)

        # define s3 bucket for redis data backup
        # do not use RemovalPolicy.DESTORY on production, use RemovalPolicy.RETAIN instead
        # bucket: s3.Bucket = _s3.Bucket(self, "RankingDataBackup", removal_policy=core.RemovalPolicy.DESTROY)

        # define elasticache for ranking
        elasticache = _elasticache.CfnCacheCluster(
            self,
            id="LeaderBoardElasticache",
            cache_node_type="cache.t2.micro",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.6",
            cache_parameter_group_name="default.redis5.0",
            cache_subnet_group_name=subnet_group.cache_subnet_group_name,
            vpc_security_group_ids=[security_group.security_group_id])

        elasticache.apply_removal_policy(core.RemovalPolicy.DESTROY)
        elasticache.add_depends_on(subnet_group)

        elasticache_host = elasticache.attr_redis_endpoint_address
        elasticache_port = elasticache.attr_redis_endpoint_port

        lambda_function = _lambda.Function(self, "LeaderBoardFunction",
                                           handler='lambda_handler.handler',
                                           runtime=_lambda.Runtime.PYTHON_3_8,
                                           code=_lambda.Code.from_asset('lambda'),
                                           memory_size=128,
                                           vpc=vpc,
                                           security_group=security_group,
                                           timeout=core.Duration.seconds(10),
                                           log_retention=_logs.RetentionDays.ONE_WEEK,
                                           layers=[self.create_dependencies_layer("leaderboard", "lambda")])

        lambda_function.add_environment("REDIS_HOST", elasticache_host)
        lambda_function.add_environment("REDIS_PORT", elasticache_port)
        lambda_function.add_environment("ADMIN_SECRET_TOKEN", environment.ADMIN_SECRET_TOKEN)
        lambda_function.add_environment("DEFAULT_FETCH_COUNT", str(environment.DEFAULT_FETCH_COUNT))
        lambda_function.add_environment("MAX_FETCH_COUNT", str(environment.MAX_FETCH_COUNT))

        base_api = _apigw.RestApi(self, 'LeaderBoardApi', rest_api_name='LeaderBoardApi')

        root_api = base_api.root
        entity_lambda_integration = _apigw.LambdaIntegration(lambda_function, proxy=True, integration_responses=[
            {
                'statusCode': '200',
                "responseParameters": {
                    'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }
        ])

        root_api.add_method('GET', entity_lambda_integration,
                            method_responses=[{
                                'statusCode': '200',
                                'responseParameters': {
                                    'method.response.header.Access-Control-Allow-Origin': True,
                                }
                            }])

        entity = root_api.add_resource("{proxy+}")
        entity.add_method("ANY", _apigw.LambdaIntegration(lambda_function))

        self.add_cors_options(root_api)
コード例 #17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc = aws_ec2.Vpc(self, 'RedisVPC', max_azs=2)

        sg_use_elasticache = aws_ec2.SecurityGroup(
            self,
            'RedisClientSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for redis client',
            security_group_name='use-default-redis')
        core.Tags.of(sg_use_elasticache).add('Name', 'use-default-redis')

        sg_elasticache = aws_ec2.SecurityGroup(
            self,
            'RedisServerSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for redis',
            security_group_name='default-redis-server')
        core.Tags.of(sg_elasticache).add('Name', 'redis-server')

        sg_elasticache.add_ingress_rule(peer=sg_use_elasticache,
                                        connection=aws_ec2.Port.tcp(6379),
                                        description='use-default-redis')

        elasticache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            'RedisSubnetGroup',
            description='subnet group for redis',
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='default-redis')

        redis_param_group = aws_elasticache.CfnParameterGroup(
            self,
            'RedisParamGroup',
            cache_parameter_group_family='redis5.0',
            description='parameter group for redis5.0',
            properties={
                'databases': '256',  # database: 16 (default)
                'tcp-keepalive': '0',  #tcp-keepalive: 300 (default)
                'maxmemory-policy':
                'volatile-ttl'  #maxmemory-policy: volatile-lru (default)
            })

        redis_primary_only = aws_elasticache.CfnCacheCluster(
            self,
            'RedisCache',
            cache_node_type='cache.t3.small',
            #XXX: NumCacheNodes should be 1 if engine is redis
            num_cache_nodes=1,
            engine='redis',
            engine_version='5.0.5',
            auto_minor_version_upgrade=False,
            cluster_name='elasticache-redis',
            snapshot_retention_limit=3,
            snapshot_window='17:00-19:00',
            preferred_maintenance_window='mon:19:00-mon:20:30',
            #XXX: Elasticache.CfnParameterGroup cannot be initialized with a parameter_group_name
            # https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/484
            # https://github.com/aws/aws-cdk/issues/8180
            cache_parameter_group_name=redis_param_group.ref,
            cache_subnet_group_name=elasticache_subnet_group.
            cache_subnet_group_name,
            vpc_security_group_ids=[sg_elasticache.security_group_id],
            tags=[
                core.CfnTag(key='Name', value='redis-primary-only'),
                core.CfnTag(key='desc', value='primary only redis')
            ])
        #XXX: Subnet group must exist before ElastiCache is created
        redis_primary_only.add_depends_on(elasticache_subnet_group)

        redis_with_replicas = aws_elasticache.CfnReplicationGroup(
            self,
            'RedisCacheWithReplicas',
            cache_node_type='cache.t3.small',
            engine='redis',
            engine_version='5.0.5',
            snapshot_retention_limit=3,
            snapshot_window='19:00-21:00',
            preferred_maintenance_window='mon:21:00-mon:22:30',
            automatic_failover_enabled=True,
            auto_minor_version_upgrade=False,
            multi_az_enabled=True,
            replication_group_description='redis with replicas',
            replicas_per_node_group=1,
            cache_parameter_group_name=redis_param_group.ref,
            cache_subnet_group_name=elasticache_subnet_group.
            cache_subnet_group_name,
            security_group_ids=[sg_elasticache.security_group_id],
            tags=[
                core.CfnTag(key='Name', value='redis-with-replicas'),
                core.CfnTag(key='desc', value='primary-replica redis')
            ])
        redis_with_replicas.add_depends_on(elasticache_subnet_group)

        redis_cluster_param_group = aws_elasticache.CfnParameterGroup(
            self,
            'RedisClusterParamGroup',
            cache_parameter_group_family='redis5.0',
            description='parameter group for redis5.0 cluster',
            properties={
                'cluster-enabled': 'yes',  # Enable cluster mode
                'tcp-keepalive': '0',  #tcp-keepalive: 300 (default)
                'maxmemory-policy':
                'volatile-ttl'  #maxmemory-policy: volatile-lru (default)
            })

        redis_cluster = aws_elasticache.CfnReplicationGroup(
            self,
            'RedisCluster',
            cache_node_type='cache.t3.small',
            engine='redis',
            engine_version='5.0.5',
            snapshot_retention_limit=3,
            snapshot_window='19:00-21:00',
            preferred_maintenance_window='mon:21:00-mon:22:30',
            automatic_failover_enabled=True,
            auto_minor_version_upgrade=False,
            #XXX: Each Node Group needs to have at least one replica for Multi-AZ enabled Replication Group
            multi_az_enabled=False,
            replication_group_description='redis5.0 cluster on',
            num_node_groups=3,
            cache_parameter_group_name=redis_cluster_param_group.ref,
            cache_subnet_group_name=elasticache_subnet_group.
            cache_subnet_group_name,
            security_group_ids=[sg_elasticache.security_group_id],
            tags=[
                core.CfnTag(key='Name', value='redis-cluster'),
                core.CfnTag(key='desc', value='primary-replica redis')
            ])
        redis_cluster.add_depends_on(elasticache_subnet_group)
コード例 #18
0
  def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
    super().__init__(scope, construct_id, **kwargs)

    vpc_name = self.node.try_get_context("vpc_name")
    vpc = aws_ec2.Vpc.from_lookup(self, "VPC",
      # is_default=True, #XXX: Whether to match the default VPC
      vpc_name=vpc_name)

    # s3_bucket_name = self.node.try_get_context('s3_bucket_name')
    # s3_bucket = s3.Bucket.from_bucket_name(self, id, s3_bucket_name)
    s3_bucket_name_suffix = self.node.try_get_context('s3_bucket_name_suffix')
    s3_bucket = s3.Bucket(self, 'TransRecentAnncmtBucket',
      # removal_policy=cdk.RemovalPolicy.DESTROY,
      bucket_name='aws-rss-feed-{region}-{suffix}'.format(region=cdk.Aws.REGION,
        suffix=s3_bucket_name_suffix))

    s3_bucket.add_lifecycle_rule(prefix='whats-new-html/', id='whats-new-html',
      abort_incomplete_multipart_upload_after=cdk.Duration.days(3),
      expiration=cdk.Duration.days(7))

    sg_use_elasticache = aws_ec2.SecurityGroup(self, 'RssFeedTransBotCacheClientSG',
      vpc=vpc,
      allow_all_outbound=True,
      description='security group for redis client used rss feed trans bot',
      security_group_name='use-rss-feed-trans-bot-redis'
    )
    cdk.Tags.of(sg_use_elasticache).add('Name', 'use-rss-feed-trans-bot-redis')

    sg_elasticache = aws_ec2.SecurityGroup(self, 'RssFeedTransBotCacheSG',
      vpc=vpc,
      allow_all_outbound=True,
      description='security group for redis used rss feed trans bot',
      security_group_name='rss-feed-trans-bot-redis'
    )
    cdk.Tags.of(sg_elasticache).add('Name', 'rss-feed-trans-bot-redis')

    sg_elasticache.add_ingress_rule(peer=sg_use_elasticache, connection=aws_ec2.Port.tcp(6379), description='use-rss-feed-trans-bot-redis')

    elasticache_subnet_group = aws_elasticache.CfnSubnetGroup(self, 'RssFeedTransBotCacheSubnetGroup',
      description='subnet group for rss-feed-trans-bot-redis',
      subnet_ids=vpc.select_subnets(subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT).subnet_ids,
      cache_subnet_group_name='rss-feed-trans-bot-redis'
    )

    translated_feed_cache = aws_elasticache.CfnCacheCluster(self, 'RssFeedTransBotCache',
      cache_node_type='cache.t3.small',
      num_cache_nodes=1,
      engine='redis',
      engine_version='5.0.5',
      auto_minor_version_upgrade=False,
      cluster_name='rss-feed-trans-bot-redis',
      snapshot_retention_limit=3,
      snapshot_window='17:00-19:00',
      preferred_maintenance_window='mon:19:00-mon:20:30',
      #XXX: Do not use referece for 'cache_subnet_group_name' - https://github.com/aws/aws-cdk/issues/3098
      cache_subnet_group_name=elasticache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
      vpc_security_group_ids=[sg_elasticache.security_group_id]
    )

    #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster.
    # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname
    translated_feed_cache.add_depends_on(elasticache_subnet_group)

    cluster = aws_ecs.Cluster(self, "ECSCluster",
      cluster_name="rssfeed-trans-bot",
      vpc=vpc
    )

    task_role_policy_doc = aws_iam.PolicyDocument()
    task_role_policy_doc.add_statements(aws_iam.PolicyStatement(**{
      "effect": aws_iam.Effect.ALLOW,
      "resources": [s3_bucket.bucket_arn, "{}/*".format(s3_bucket.bucket_arn)],
      "actions": ["s3:AbortMultipartUpload",
        "s3:GetBucketLocation",
        "s3:GetObject",
        "s3:ListBucket",
        "s3:ListBucketMultipartUploads",
        "s3:PutObject"]
    }))

    task_execution_role = aws_iam.Role(self, 'ecsScheduledTaskRole',
      role_name='ecsRssFeedTransTaskExecutionRole',
      assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
      inline_policies = {
        "s3access": task_role_policy_doc
      },
      managed_policies=[
        aws_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy"),
        aws_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSESFullAccess")
      ]
    )

    #XXX: ECS Fargate Task Scheduling using existing Security Group #5213
    # https://github.com/aws/aws-cdk/issues/5213
    # https://stackoverflow.com/questions/59067514/aws-cdk-ecs-task-scheduling-specify-existing-securitygroup
    task = aws_ecs.FargateTaskDefinition(self, 'ECSTaskDef',
      cpu=512,
      memory_limit_mib=1024,
      task_role=task_execution_role
    )

    repository_name = self.node.try_get_context('container_repository_name')
    repository_arn = aws_ecr.Repository.arn_for_local_repository(repository_name,
      self, cdk.Aws.ACCOUNT_ID)

    # repository = aws_ecr.Repository.from_repository_arn(self, "Repository",
    #   repository_arn=repository_arn)
    #
    # jsii.errors.JSIIError: "repositoryArn" is a late-bound value,
    # and therefore "repositoryName" is required. Use `fromRepositoryAttributes` instead
    repository = aws_ecr.Repository.from_repository_attributes(self, "ContainerRepository",
      repository_arn=repository_arn,
      repository_name=repository_name)

    container_image_tag = self.node.try_get_context('container_image_tag')
    container_image_tag = 'latest' if not container_image_tag else container_image_tag

    DRY_RUN = self.node.try_get_context('dry_run')
    DRY_RUN = 'false' if not DRY_RUN else DRY_RUN

    TRANSLATE_ALL_FEEDS = self.node.try_get_context('translate_all_feeds')
    TRANSLATE_ALL_FEEDS = 'false' if not TRANSLATE_ALL_FEEDS else TRANSLATE_ALL_FEEDS

    TRANS_DEST_LANG = self.node.try_get_context('trans_dest_lang')
    TRANS_DEST_LANG = 'false' if not TRANS_DEST_LANG else TRANS_DEST_LANG

    EMAIL_FROM_ADDRESS = self.node.try_get_context('email_from_address')
    EMAIL_TO_ADDRESSES = self.node.try_get_context('email_to_addresses')
    task.add_container('transbot',
      image=aws_ecs.ContainerImage.from_ecr_repository(repository, tag=container_image_tag),
      environment={
        "ELASTICACHE_HOST": translated_feed_cache.attr_redis_endpoint_address,
        "DRY_RUN": DRY_RUN,
        "TRANS_DEST_LANG": TRANS_DEST_LANG,
        "TRANSLATE_ALL_FEEDS": TRANSLATE_ALL_FEEDS,
        "EMAIL_FROM_ADDRESS": EMAIL_FROM_ADDRESS,
        "EMAIL_TO_ADDRESSES": EMAIL_TO_ADDRESSES,
        "REGION_NAME": cdk.Aws.REGION
      },
      logging=aws_ecs.LogDriver.aws_logs(stream_prefix="ecs",
        log_group=aws_logs.LogGroup(self, 
          "ECSContainerLogGroup",
          log_group_name="/ecs/rss-feed-trans-bot",
          retention=aws_logs.RetentionDays.ONE_DAY,
          removal_policy=cdk.RemovalPolicy.DESTROY)
      )
    )

    # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
    event_schedule = dict(zip(['minute', 'hour', 'month', 'week_day', 'year'],
      self.node.try_get_context('event_schedule').split(' ')))

    scheduled_event_rule = aws_events.Rule(self, 'RssFeedScheduledRule',
      enabled=True,
      schedule=aws_events.Schedule.cron(**event_schedule),
      description="Translate AWS What's New")

    ecs_events_role = aws_iam.Role(self, 'ecsEventsRole',
      role_name='ecsRssFeedTransEventsRole',
      assumed_by=aws_iam.ServicePrincipal('events.amazonaws.com'),
      managed_policies=[
        aws_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2ContainerServiceEventsRole")
      ]
    )

    scheduled_event_rule.add_target(aws_events_targets.EcsTask(cluster=cluster,
      task_definition=task,
      role=ecs_events_role,
      security_groups=[sg_use_elasticache],
      subnet_selection=aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT)))
コード例 #19
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        default_vpc = ec2.Vpc.from_lookup(self, 'default-vpc', is_default=True)
        cache_security_group = ec2.SecurityGroup(
            self,
            'devassoc-cache-sg',
            vpc=default_vpc,
            security_group_name='cache-sg-dev-demo',
            description='Elasticache Security Group for AWS Dev Study Guide')
        cache_security_group.add_ingress_rule(
            ec2.Peer.ipv4('99.116.136.249/32'), ec2.Port.tcp(22),
            'SSH from my IP')
        cache_security_group.add_ingress_rule(cache_security_group,
                                              ec2.Port.tcp(2049),
                                              'NFS for mount')

        ecache.CfnCacheCluster(
            self,
            'elasticache',
            engine='Memcached',
            cluster_name='devassoc-memcache',
            num_cache_nodes=2,
            cache_node_type='cache.t2.micro',
            vpc_security_group_ids=[cache_security_group.security_group_id])

        efs_volume = efs.FileSystem(self,
                                    'efs-volume',
                                    vpc=default_vpc,
                                    security_group=cache_security_group,
                                    removal_policy=core.RemovalPolicy.DESTROY)

        ec2.Instance(
            self,
            'ec2-efs-instance',
            instance_name='efs-instance',
            instance_type=type.T2_MICRO,
            machine_image=ec2.MachineImage.generic_linux(ami_map=ami_map),
            vpc=default_vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=cache_security_group,
            key_name='devassoc')

        bucket_name = 'devassoc-storage-versioned'
        bucket = s3.Bucket(self,
                           'bucket-versioned',
                           bucket_name=bucket_name,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True,
                           versioned=True)
        deploy = s3deploy.BucketDeployment(
            self,
            'DeployFiles',
            destination_bucket=bucket,
            sources=[
                s3deploy.Source.asset('./study_guide_exercises/polly_file')
            ],
            storage_class=s3deploy.StorageClass.ONEZONE_IA,
            cache_control=[s3deploy.CacheControl.set_public()])

        dynamodb_table_name = 'State'
        state_id = dynamodb.Attribute(name='Id',
                                      type=dynamodb.AttributeType.STRING)
        dynamo_db = dynamodb.Table(self,
                                   'dynamodb-stateless-app',
                                   table_name=dynamodb_table_name,
                                   partition_key=state_id,
                                   read_capacity=2,
                                   write_capacity=2,
                                   removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self, 'db-table-name', value=dynamo_db.table_name)
        core.CfnOutput(self, 'db-table-arn', value=dynamo_db.table_arn)

        global_table_name = 'Tables'
        table_id = dynamodb.Attribute(name='Id',
                                      type=dynamodb.AttributeType.STRING)
        table_group = dynamodb.Attribute(name='Group',
                                         type=dynamodb.AttributeType.STRING)
        dynamo_db_global = dynamodb.Table(
            self,
            'dynamodb-global',
            table_name=global_table_name,
            partition_key=table_id,
            sort_key=table_group,
            stream=dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            replication_regions=['us-west-2', 'eu-central-1'],
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       'global-table-name',
                       value=dynamo_db_global.table_name)
        core.CfnOutput(self,
                       'global-table-arn',
                       value=dynamo_db_global.table_arn)

        # TODO: create this in different region and set up replication
        replication_bucket_name = 'devassoc-storage-replica'
        bucket = s3.Bucket(self,
                           'bucket-replica',
                           bucket_name=replication_bucket_name,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True,
                           versioned=True)

        backup_plan = backup.BackupPlan.daily_weekly_monthly5_year_retention(
            self, 'backup-plan')
        backup_plan.add_selection(
            'backup-selection',
            resources=[backup.BackupResource.from_dynamo_db_table(dynamo_db)],
            backup_selection_name='StateBackup')
コード例 #20
0
    def __init__(self, scope: core.Construct, id: str, context: InfraContext,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.security_group = ec2.SecurityGroup(
            self,
            'FriendlyNamedSvc-SG',
            vpc=context.networking.vpc,
            allow_all_outbound=True,
            description='Security group for FriendlyNamed service components')

        self.security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation='RedisInbound',
                                from_port=6379,
                                to_port=6379))

        self.subnet_group = ec.CfnSubnetGroup(
            self,
            'CacheSubnets',
            cache_subnet_group_name='FriendlyNamed-Subnets',
            description='Subnet groups for FriendlyNamed service',
            subnet_ids=[
                net.subnet_id
                for net in context.networking.vpc._select_subnet_objects(
                    subnet_group_name='FriendlyNamed')
            ])

        self.cluster = ec.CfnCacheCluster(
            self,
            'FriendlyNamedStore',
            cache_node_type="cache.t2.micro",
            engine='redis',
            cluster_name='friendly-named',
            num_cache_nodes=1,
            auto_minor_version_upgrade=True,
            cache_subnet_group_name=self.subnet_group.cache_subnet_group_name,
            vpc_security_group_ids=[self.security_group.security_group_id])

        self.python_lambda = PythonLambda(
            self,
            'Friendly-Named',
            build_prefix='artifacts/FinSurf-Friendly-Named',
            handler='handler.app',
            subnet_group_name='FriendlyNamed',
            context=context,
            securityGroups=[self.security_group])

        self.python_lambda.function.add_environment(
            key='REDIS_HOST', value=self.cluster.attr_redis_endpoint_address)
        self.python_lambda.function.add_environment(
            key='REDIS_PORT', value=self.cluster.attr_redis_endpoint_port)

        self.frontend_proxy = LambdaProxyConstruct(
            self,
            'FriendlyNamedAPI',
            handler=self.python_lambda.function,
            context=context)

        self.url = self.frontend_proxy.rest_api.url
コード例 #21
0
    def __init__(self, scope: core.Construct, id: str, env: core.Environment,
                 **kwargs) -> None:
        super().__init__(scope, id, env=env, *kwargs)

        # Create a /16 VPC with 1 public and private subnet
        # CIDR range will be divided evenly among subnets
        vpc = ec2.Vpc(self, 'bot-vpc', max_azs=2)

        # Create a Redis instance
        redis_security_group = ec2.SecurityGroup(self,
                                                 'redis-security-group',
                                                 vpc=vpc)
        redis_security_group.add_ingress_rule(
            peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(6379),
            description='Allow connection from within VPC')

        redis_subnetgroup = ec.CfnSubnetGroup(
            self,
            'redis-subnetgroup',
            description="Group of private subnets from the VPC",
            subnet_ids=[ps.subnet_id for ps in vpc.private_subnets])

        redis_cluster = ec.CfnCacheCluster(
            self,
            'redis-cluster',
            cache_node_type='cache.t2.small',
            engine='redis',
            num_cache_nodes=1,
            port=6379,
            cache_subnet_group_name=redis_subnetgroup.ref,
            vpc_security_group_ids=[redis_security_group.security_group_id])
        redis_cluster.add_depends_on(redis_subnetgroup)

        #  Create a cluster
        cluster = ecs.Cluster(self, 'bot-cluster', vpc=vpc)

        # Create a Worker task definition
        worker_logging = ecs.AwsLogDriver(stream_prefix='worker')

        worker_task = ecs.FargateTaskDefinition(self,
                                                'worker-task',
                                                cpu=256,
                                                memory_limit_mib=512)
        worker_task.add_container(
            id='worker-container',
            image=ecs.ContainerImage.from_registry(
                'leonweecs/cosmos-worker:1.1'),
            environment={
                'REDIS_HOST': redis_cluster.attr_redis_endpoint_address,
                'REDIS_PORT': redis_cluster.attr_redis_endpoint_port
            },
            logging=worker_logging).add_port_mappings(
                ecs.PortMapping(container_port=80, host_port=80))

        # Create Worker Service
        worker_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            'worker-service',
            cluster=cluster,
            assign_public_ip=False,
            cpu=256,
            memory_limit_mib=512,
            task_definition=worker_task,
            desired_count=1,
            protocol=elb.ApplicationProtocol.
            HTTP  # HTTPS requires valid domain name and SSL certificate
        )

        # Add a rule to allow ELB to talk with containers
        worker_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(80),
            description='Allow http inbound from VPC')

        # Configure ELB health check route
        worker_service.target_group.configure_health_check(
            path='/worker',
            healthy_http_codes='200-299',
        )

        # Setup AutoScaling policy
        scaling = worker_service.service.auto_scale_task_count(max_capacity=3)
        scaling.scale_on_cpu_utilization(
            'CpuScaling',
            target_utilization_percent=65,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60),
        )

        # Create a MX task definition
        mx_logging = ecs.AwsLogDriver(stream_prefix='mx')

        mx_task = ecs.FargateTaskDefinition(self,
                                            'mx-task',
                                            cpu=256,
                                            memory_limit_mib=512)
        mx_task.add_container(
            id='mx-container',
            image=ecs.ContainerImage.from_registry('leonweecs/cosmos-mx:1.1'),
            environment={
                'REDIS_HOST':
                redis_cluster.attr_redis_endpoint_address,
                'REDIS_PORT':
                redis_cluster.attr_redis_endpoint_port,
                'WORKER_HOST':
                worker_service.load_balancer.load_balancer_dns_name,
                'WORKER_PORT':
                '80',
                'API_ID':
                ssm.StringParameter.value_for_string_parameter(self, "API_ID"),
                'API_HASH':
                ssm.StringParameter.value_for_string_parameter(
                    self, "API_HASH"),
                'BOT_TOKEN':
                ssm.StringParameter.value_for_string_parameter(
                    self, "BOT_TOKEN"),
            },
            logging=mx_logging)

        # Create a MX service
        mx_security_group = ec2.SecurityGroup(self,
                                              'mx-security-group',
                                              vpc=vpc)

        mx_service = ecs.FargateService(self,
                                        'mx-service',
                                        task_definition=mx_task,
                                        assign_public_ip=True,
                                        security_group=mx_security_group,
                                        cluster=cluster)

        core.CfnOutput(
            self,
            "LoadBalancerDNS",
            value=worker_service.load_balancer.load_balancer_dns_name)
コード例 #22
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Read BootStrap Script
        try:
            with open("bootstrap_scripts/install_httpd.sh", mode="r") as file:
                user_data = file.read()
        except OSError:
            print('Unable to read UserData script')

        # Get the latest AMI from AWS SSM
        linux_ami = _ec2.AmazonLinuxImage(
            generation=_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=_ec2.AmazonLinuxEdition.STANDARD,
            virtualization=_ec2.AmazonLinuxVirt.HVM,
            storage=_ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Read BootStrap Script
        try:
            with open("bootstrap_scripts/install_httpd.sh", mode="r") as file:
                user_data = file.read()
        except OSError:
            print('Unable to read UserData script')

        # Get the latest ami
        amzn_linux_ami = _ec2.MachineImage.latest_amazon_linux(
            generation=_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)
        # ec2 Instance Role
        _instance_role = _iam.Role(
            self,
            "webAppClientRole",
            assumed_by=_iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMManagedInstanceCore'),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonS3ReadOnlyAccess')
            ])
        # web_app_client Instance
        web_app_client = _ec2.Instance(
            self,
            "webAppClient",
            instance_type=_ec2.InstanceType(
                instance_type_identifier="t2.micro"),
            instance_name="web_app_Client",
            machine_image=amzn_linux_ami,
            vpc=vpc,
            vpc_subnets=_ec2.SubnetSelection(
                subnet_type=_ec2.SubnetType.PUBLIC),
            role=_instance_role,
            user_data=_ec2.UserData.custom(user_data))

        # S3 Bucket
        app_data_bkt = _s3.Bucket(self,
                                  "appDataBkt",
                                  removal_policy=core.RemovalPolicy.DESTROY)

        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "ApplicationClient",
            value=web_app_client.instance_id,
            description=
            f"This instance can be used as app client for testing performance")
        output_2 = core.CfnOutput(
            self,
            "MonitoredS3Bucket",
            value=(
                #   f"https://console.aws.amazon.com/s3/buckets/"
                f"{app_data_bkt.bucket_name}"),
            description=f"S3 Bucket to host application data")

        # Security Group for redis
        redis_sg = _ec2.SecurityGroup(
            self,
            'redisSecurityGroup',
            vpc=vpc,
            security_group_name='RedisSG',
            description="Security Group for Redis Cache",
            allow_all_outbound=True)

        # Allows Cache Cluster to receive traffic from the VPC on port 6379
        redis_sg.add_ingress_rule(
            _ec2.Peer.ipv4(vpc.vpc_cidr_block),
            # _ec2.Peer.any_ipv4(),
            _ec2.Port.tcp(6379),
            description="Allow Clients to fetch data from Redis Cache Cluster")

        # Iterate the private subnets
        pvt_subnets = vpc.select_subnets(subnet_type=_ec2.SubnetType.PRIVATE)

        # Create the Redis Subnet Group
        redis_subnet_group = _elasticache.CfnSubnetGroup(
            self,
            'redis-sg',
            subnet_ids=pvt_subnets.subnet_ids,
            description='subnet group for redis')

        # Apparently no CDK Construct(yet Mar2020) for ElastiCache. No love there
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticache/CfnCacheCluster.html
        # Lets use the CFn Construct
        redis_cluster = _elasticache.CfnCacheCluster(
            self,
            'redisCluster',
            cache_node_type='cache.t3.micro',
            engine='redis',
            num_cache_nodes=1,
            port=6379,
            cluster_name='miztiik-cluster',
            cache_subnet_group_name=redis_subnet_group.ref,
            vpc_security_group_ids=[redis_sg.security_group_id],
            auto_minor_version_upgrade=True)
        redis_cluster.add_depends_on(redis_subnet_group)

        output_3 = core.CfnOutput(
            self,
            'redisSg',
            value=redis_sg.security_group_id,
            export_name='redisSg',
            description='The ElastiCache Cluster Security Group Id')
        output_4 = core.CfnOutput(
            self,
            'redisClusterEndpoint',
            value=redis_cluster.attr_redis_endpoint_address,
            description='The endpoint of the ElastiCache Cluster')
        output_5 = core.CfnOutput(
            self,
            'redisClusterPort',
            value=redis_cluster.attr_redis_endpoint_port,
            description='The port of the ElastiCache Cluster')

        # Lets load some dummy data into the ES Cluster & S3 using a custom lambda custom_resource
        ingest_data_redis = redis_data_ingester(
            self,
            "ingestData",
            config_params={
                "REDIS_HOST": redis_cluster.attr_redis_endpoint_address,
                'REDIS_PORT': '6379',
                "BUCKET_NAME": app_data_bkt.bucket_name,
                'RECORD_COUNT': '200',
                'BUCKET': app_data_bkt,
                'VPC': vpc,
                'REDIS_SG': redis_sg
            },
            message=[{
                "REDIS_HOST": redis_cluster.attr_redis_endpoint_address,
                'REDIS_PORT': '6379',
                "BUCKET_NAME": app_data_bkt.bucket_name,
                'RECORD_COUNT': '200'
            }])
        """
コード例 #23
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        dataset_metadata_filename: str,
        dataset_metadata_generator_function_name: str,
        memory: int = 1024,
        timeout: int = 30,
        concurrent: int = 100,
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, **kwargs)

        # add cache
        if config.VPC_ID:
            vpc = ec2.Vpc.from_lookup(
                self,
                f"{id}-vpc",
                vpc_id=config.VPC_ID,
            )
        else:
            vpc = ec2.Vpc(self, f"{id}-vpc")

        sb_group = escache.CfnSubnetGroup(
            self,
            f"{id}-subnet-group",
            description=f"{id} subnet group",
            subnet_ids=[sb.subnet_id for sb in vpc.private_subnets],
        )

        lambda_function_security_group = ec2.SecurityGroup(self,
                                                           f"{id}-lambda-sg",
                                                           vpc=vpc)
        lambda_function_security_group.add_egress_rule(
            ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol("ALL"),
                                string_representation=""),
            description="Allow lambda security group all outbound access",
        )

        cache_security_group = ec2.SecurityGroup(self,
                                                 f"{id}-cache-sg",
                                                 vpc=vpc)

        cache_security_group.add_ingress_rule(
            lambda_function_security_group,
            connection=ec2.Port(protocol=ec2.Protocol("ALL"),
                                string_representation=""),
            description=
            "Allow Lambda security group access to Cache security group",
        )

        cache = escache.CfnCacheCluster(
            self,
            f"{id}-cache",
            cache_node_type=config.CACHE_NODE_TYPE,
            engine=config.CACHE_ENGINE,
            num_cache_nodes=config.CACHE_NODE_NUM,
            vpc_security_group_ids=[cache_security_group.security_group_id],
            cache_subnet_group_name=sb_group.ref,
        )

        logs_access = iam.PolicyStatement(
            actions=[
                "logs:CreateLogGroup",
                "logs:CreateLogStream",
                "logs:PutLogEvents",
            ],
            resources=["*"],
        )
        ec2_network_access = iam.PolicyStatement(
            actions=[
                "ec2:CreateNetworkInterface",
                "ec2:DescribeNetworkInterfaces",
                "ec2:DeleteNetworkInterface",
            ],
            resources=["*"],
        )

        lambda_env = DEFAULT_ENV.copy()
        lambda_env.update(
            dict(
                MODULE_NAME="covid_api.main",
                VARIABLE_NAME="app",
                WORKERS_PER_CORE="1",
                LOG_LEVEL="error",
                MEMCACHE_HOST=cache.attr_configuration_endpoint_address,
                MEMCACHE_PORT=cache.attr_configuration_endpoint_port,
                DATASET_METADATA_FILENAME=dataset_metadata_filename,
                DATASET_METADATA_GENERATOR_FUNCTION_NAME=
                dataset_metadata_generator_function_name,
                PLANET_API_KEY=os.environ["PLANET_API_KEY"],
            ))

        lambda_function_props = dict(
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=self.create_package(code_dir),
            handler="handler.handler",
            memory_size=memory,
            timeout=core.Duration.seconds(timeout),
            environment=lambda_env,
            security_groups=[lambda_function_security_group],
            vpc=vpc,
        )

        if concurrent:
            lambda_function_props[
                "reserved_concurrent_executions"] = concurrent

        lambda_function = aws_lambda.Function(self, f"{id}-lambda",
                                              **lambda_function_props)

        lambda_function.add_to_role_policy(s3_full_access_to_data_bucket)
        lambda_function.add_to_role_policy(logs_access)
        lambda_function.add_to_role_policy(ec2_network_access)

        # defines an API Gateway Http API resource backed by our "dynamoLambda" function.
        apigw.HttpApi(
            self,
            f"{id}-endpoint",
            default_integration=apigw_integrations.LambdaProxyIntegration(
                handler=lambda_function),
        )
コード例 #24
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.poc_config = {'api_poc': dict()}
        self.read_config()

        # shared stuff
        self._vpc = ec2.Vpc(
            self,
            'api_poc-vpc',
            cidr='10.0.0.0/23',
            max_azs=1,
            nat_gateways=1,
        )

        self._private_subnet_selection = self._vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE)
        self._security_group = ec2.SecurityGroup.from_security_group_id(
            self,
            'default_sg',
            security_group_id=self._vpc.vpc_default_security_group)

        self._security_group.add_ingress_rule(description='redis',
                                              peer=self._security_group,
                                              connection=ec2.Port.tcp_range(
                                                  start_port=6379,
                                                  end_port=6379))

        self._python3_lib_layer = _lambda.LayerVersion(
            self,
            'python3-lib-layer',
            description="python3 module dependencies",
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_6
            ],
            code=_lambda.Code.from_asset('layers/python3-lib-layer.zip'))

        # redis cache cluster
        self._cache_subnet_group = elasticache.CfnSubnetGroup(
            self,
            'cache_subnet_group',
            description='elasticache subnet group',
            subnet_ids=self._private_subnet_selection.subnet_ids,
            cache_subnet_group_name='cache-subnet-group')

        self._redis_cache = elasticache.CfnCacheCluster(
            self,
            'cache',
            cache_node_type='cache.t2.micro',
            num_cache_nodes=1,
            engine='redis',
            cache_subnet_group_name='cache-subnet-group',
            vpc_security_group_ids=[self._security_group.security_group_id],
        )
        self._redis_cache.add_depends_on(self._cache_subnet_group)

        # external API simulator lambda
        api_handler = _lambda.Function(
            self,
            "external-api",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='external_api.handler',
            layers=[self._python3_lib_layer],
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE)
        api_handler.add_environment('REDIS_ADDRESS', self.redis_address)
        api_handler.add_environment('REDIS_PORT', self.redis_port)

        # API Gateway frontend to simulator lambda
        self._api_gateway = apigw.LambdaRestApi(
            self,
            'external_api',
            description='external API emulator',
            options=apigw.StageOptions(stage_name='dev'),
            handler=api_handler,
            proxy=True)

        job_dlq = sqs.Queue(self, 'job-dlq')

        job_queue = sqs.Queue(self,
                              'job-queue',
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  queue=job_dlq, max_receive_count=3))

        throttle_event_topic = sns.Topic(self, 'throttle-events-topic')

        self.add_sns_subscriptions(throttle_event_topic)

        worker = _lambda.Function(self,
                                  'worker',
                                  runtime=_lambda.Runtime.PYTHON_3_7,
                                  code=_lambda.Code.asset('lambda'),
                                  handler='worker.handler',
                                  layers=[self._python3_lib_layer],
                                  reserved_concurrent_executions=20,
                                  timeout=core.Duration.minutes(1),
                                  vpc=self._vpc,
                                  vpc_subnets=self._private_subnet_selection,
                                  security_group=self._security_group,
                                  log_retention=logs.RetentionDays.FIVE_DAYS,
                                  tracing=_lambda.Tracing.ACTIVE,
                                  dead_letter_queue_enabled=False)
        worker.add_environment('API_KEY', '212221848ab214821de993a9d')
        worker.add_environment('JOB_QUEUE_URL', job_queue.queue_url)
        worker.add_environment('THROTTLE_EVENTS_TOPIC',
                               throttle_event_topic.topic_arn)
        worker.add_environment('REDIS_ADDRESS', self.redis_address)
        worker.add_environment('REDIS_PORT', self.redis_port)
        job_queue.grant_send_messages(worker)
        throttle_event_topic.grant_publish(worker)

        orchestrator = _lambda.Function(
            self,
            'orchestrator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='orchestrator.handler',
            layers=[self._python3_lib_layer],
            reserved_concurrent_executions=1,
            timeout=core.Duration.minutes(2),
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        orchestrator.add_environment('API_HOST_URL', self._api_gateway.url)
        orchestrator.add_environment('JOB_QUEUE_URL', job_queue.queue_url)
        orchestrator.add_environment('JOB_DLQ_URL', job_dlq.queue_url)
        orchestrator.add_environment('THROTTLE_EVENTS_TOPIC',
                                     throttle_event_topic.topic_arn)
        orchestrator.add_environment('REDIS_ADDRESS', self.redis_address)
        orchestrator.add_environment('REDIS_PORT', self.redis_port)
        orchestrator.add_environment('WORKER_FUNCTION_ARN',
                                     worker.function_arn)
        job_queue.grant_consume_messages(orchestrator)
        job_dlq.grant_send_messages(orchestrator)
        throttle_event_topic.grant_publish(orchestrator)
        worker.grant_invoke(orchestrator)

        task_master = _lambda.Function(
            self,
            'task_master',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='task_master.handler',
            layers=[self._python3_lib_layer],
            reserved_concurrent_executions=1,
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        task_master.add_environment('SQS_URL', job_queue.queue_url)
        task_master.add_environment('REDIS_ADDRESS', self.redis_address)
        task_master.add_environment('REDIS_PORT', self.redis_port)
        task_master.add_environment('API_HOST_URL', self._api_gateway.url)
        job_queue.grant_send_messages(task_master)

        slack_notify = _lambda.Function(
            self,
            'slack-notify',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset('lambda'),
            handler='slack_notify.lambda_handler',
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        # lambda uses ssm parameter store to retrieve values
        slack_notify.add_environment('encryptedHookUrlKey',
                                     '/api_poc/notify/slack/hook_url')
        slack_notify.add_environment('slackChannelKey',
                                     '/api_poc/notify/slack/channel')
        slack_notify.add_environment('notifySlack', 'false')
        slack_notify.add_event_source(
            event_sources.SnsEventSource(throttle_event_topic))
        slack_notify.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                # TODO fix least privilege
                # actions=['ssm:GetParameter'],
                # resources=['arn:aws:ssm:::parameter/api_poc/notify/slack/*'],
                actions=['ssm:*'],
                resources=['*'],
            ))

        # kick off lambda(s) once per interval
        rule = events.Rule(self,
                           'orchestrator_rule',
                           schedule=events.Schedule.rate(
                               core.Duration.hours(1)))
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        rule.add_target(targets.LambdaFunction(orchestrator))
        rule.add_target(targets.LambdaFunction(task_master))

        # stack outputs
        core.CfnOutput(self,
                       'Redis_Address',
                       value=self._redis_cache.attr_redis_endpoint_address +
                       ':' + self._redis_cache.attr_redis_endpoint_port)