def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        self.vpc = ec2.Vpc(self, "VPC",
            cidr='10.10.0.0/16',
            max_azs=6,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    subnet_type=ec2.SubnetType.PRIVATE,
                    name='Private',
                    cidr_mask=19
                ),
                ec2.SubnetConfiguration(
                    subnet_type=ec2.SubnetType.PUBLIC,
                    name='Public',
                    cidr_mask=20
                ),
                ec2.SubnetConfiguration(
                    subnet_type=ec2.SubnetType.PRIVATE,
                    name='Spare',
                    cidr_mask=20,
                    reserved=True
                )
            ])
        core.CfnOutput(self, "Output", value=self.vpc.vpc_id)

        self.bastion = ec2.BastionHostLinux(self, id,
            vpc = self.vpc,
            instance_name = 'bastione',
            instance_type = ec2.InstanceType('t3.micro'),
            machine_image = ec2.AmazonLinuxImage(),
            subnet_selection = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
        )
        core.CfnOutput(self, 'bastion-id', value=self.bastion.instance_id)
Example #2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        #XXX: To use more than 2 AZs, be sure to specify the account and region on your stack.
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = aws_ec2.Vpc(self, 'VpcStack', max_azs=6)

        sg_ssh_access = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for bastion host',
            security_group_name='bastion-host-sg')
        core.Tag.of(sg_ssh_access).add('Name', 'bastion-host')
        sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                                       connection=aws_ec2.Port.tcp(22),
                                       description='ssh access')

        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=aws_ec2.InstanceType('t3.nano'),
            security_group=sg_ssh_access,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
Example #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc_name = self.node.try_get_context("vpc_name")
        vpc = aws_ec2.Vpc.from_lookup(
            self,
            "EC2InstanceVPC",
            is_default=
            True,  # set is_default=False if you want to find your own VPC
            vpc_name=vpc_name)

        sg_ssh_access = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for bastion host',
            security_group_name='bastion-host-sg')
        core.Tag.of(sg_ssh_access).add('Name', 'bastion-host')
        sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                                       connection=aws_ec2.Port.tcp(22),
                                       description='ssh access')

        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=aws_ec2.InstanceType('t3.nano'),
            security_group=sg_ssh_access,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
Example #4
0
    def __init__(self, scope: core.Construct, construct_id: str, vpc, ec2_capacity=False, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.cluster = ecs.Cluster(self, "CdkCluster",
                                   cluster_name="cdk-ecs",
                                   vpc=vpc)

        if ec2_capacity:
            key_name = os.getenv("EC2_KEY_PAIR_NAME")
            asg = self.cluster.add_capacity("CdkClusterCapacity",
                                            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3_AMD,
                                                                              ec2.InstanceSize.SMALL),
                                            key_name=key_name,
                                            cooldown=cdk_core.Duration.seconds(30),
                                            min_capacity=1,
                                            max_capacity=5)

            # note: CDK doesn't support ECS capacity providers yet
            asg.scale_to_track_metric("CpuReservationScalingPolicy",
                                      metric=self.cluster.metric_cpu_reservation(),
                                      target_value=50)
            asg.scale_to_track_metric("MemoryReservationScalingPolicy",
                                      metric=self.cluster.metric_memory_reservation(),
                                      target_value=50)

            bastion = ec2.BastionHostLinux(self, "CdkBastion",
                                           vpc=vpc,
                                           subnet_selection=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))
            bastion.allow_ssh_access_from(ec2.Peer.any_ipv4())
            bastion.instance.instance.key_name = key_name

            asg.connections.allow_from(bastion, ec2.Port.tcp(22))
    def build(cls, *, stack):

        vpc_db_instance = cls()

        vpc_db_instance.vpc = ec2.Vpc(stack, "vpc",
            cidr="10.0.0.0/24"
        )

        master_user_name = "exampleadmin"

        vpc_db_instance.db_security_group = ec2.SecurityGroup(stack, "dbsecuritygroup", 
            security_group_name="DBSG",
            vpc=vpc_db_instance.vpc,
            allow_all_outbound=True
        )

        vpc_db_instance.db_instance = rds.DatabaseInstance(stack,
            'exampleinstance', 
            master_username=master_user_name,
            engine=rds.DatabaseInstanceEngine.POSTGRES, 
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), 
            vpc=vpc_db_instance.vpc,
            auto_minor_version_upgrade=True,
            database_name='exampledb',
            storage_encrypted=True,
            multi_az=False,
            security_groups=[vpc_db_instance.db_security_group]
        )

        # create bastion host in public subnet with no KeyPairName (use ec2-instance-connect)

        vpc_db_instance.bastion_host_security_group = ec2.SecurityGroup(stack, "bastionhostsecuritygroup", 
            security_group_name="bastionhostsecuritygroup",
            vpc=vpc_db_instance.vpc,
            allow_all_outbound=True
        )

        vpc_db_instance.bastion_host_linux = ec2.BastionHostLinux(stack, "bastionhostSG",
            vpc=vpc_db_instance.vpc,
            instance_name="bastionhost",
            instance_type=ec2.InstanceType("t2.micro"),
            subnet_selection={
                "subnet_type": ec2.SubnetType.PUBLIC
            },
            security_group=vpc_db_instance.bastion_host_security_group
        )

        vpc_db_instance.db_security_group.add_ingress_rule(vpc_db_instance.bastion_host_security_group, ec2.Port.tcp(5432), 'bastion host')

        return vpc_db_instance
Example #6
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #Creating Bastion
        bastion = ec2.BastionHostLinux(self,
                                       'webapp-bastion',
                                       vpc=vpc,
                                       subnet_selection=ec2.SubnetSelection(
                                           subnet_type=ec2.SubnetType.PUBLIC))
        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        #Creating AutoScaling group
        self.asg = autoscaling.AutoScalingGroup(
            self,
            "webapp-autoscaling",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=2,
        )

        #Creating Load Balancer
        alb = elb.ApplicationLoadBalancer(self,
                                          "webapp-alb",
                                          vpc=vpc,
                                          internet_facing=True,
                                          load_balancer_name="webapp-alb")
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80),
                                            "Internet access ALB 80")
        listener = alb.add_listener("my80", port=80, open=True)

        #Final Configuration
        self.asg.connections.allow_from(
            alb, ec2.Port.tcp(80),
            "ALB access 80 port of EC2 in Autoscaling Group")
        listener.add_targets("addTargetGroup", port=80, targets=[self.asg])

        core.CfnOutput(self, "Output", value=alb.load_balancer_dns_name)
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        vpc = ec2.Vpc(
            self,
            "VPC",
            cidr="10.0.0.0/21",
            max_azs=3,
            nat_gateways=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Ingress",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(
                    subnet_type=ec2.SubnetType.PRIVATE,
                    name="Application",
                    cidr_mask=24,
                ),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.ISOLATED,
                                        name="Database",
                                        cidr_mask=28,
                                        reserved=True)
            ])

        host = ec2.BastionHostLinux(
            self,
            "cdk-vpc-BastionHost",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            # block_devices=[ec2.BlockDevice(
            #     device_name="EBSBastionHost_CDK",
            #     volume=ec2.BlockDeviceVolume.ebs(30,
            #                                      encrypted=True,
            #                                      delete_on_termination=True
            #                                      )
            # )]
        )

        core.CfnOutput(self,
                       "Bastion Host Public IP",
                       value=host.instance_public_ip)
        self.vpc = vpc
Example #8
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
        )

        i = 1
        for subnet in vpc.stack.vpc.select_subnets(subnet_group_name="BASTION").subnets:
            bastion_host = ec2.BastionHostLinux(self,
                                                f"ec2-BASTION-Instance{i}",
                                                vpc=vpc,
                                                subnet_selection=ec2.SubnetSelection(
                                                    availability_zones=[subnet.availability_zone],
                                                    subnet_group_name="BASTION"
                                                ),
                                                instance_type=ec2.InstanceType("t1.micro"),
                                                machine_image=amzn_linux
                                                )
            bastion_host.allow_ssh_access_from(ec2.Peer.any_ipv4())
            i += 1

        host_admin_group = iam.Group(self,
                                     "HostAdmins",
                                     )

        policy = iam.Policy(self,
                            "HostAdminPolicy",
                            groups=[host_admin_group]
                            )

        policy.add_statements(iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                                  actions=["ec2-instance-connect:SendSSHPublicKey"],
                                                  resources=
                                                  [f"arn:aws:ec2:{self.region}:{self.account}:instance/*"],
                                                  conditions={"StringEquals": {"ec2:osuser": "******"}}
                                                  ))
Example #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        private_subnet1 = ec2.SubnetConfiguration(
            cidr_mask=24,
            name='PrivateSubnet1',
            subnet_type=ec2.SubnetType.PRIVATE)

        private_subnet2 = ec2.SubnetConfiguration(
            cidr_mask=24,
            name='PrivateSubnet2',
            subnet_type=ec2.SubnetType.PRIVATE)

        public_subnet1 = ec2.SubnetConfiguration(
            cidr_mask=25,
            name='PublicSubnet1',
            subnet_type=ec2.SubnetType.PUBLIC)

        public_subnet2 = ec2.SubnetConfiguration(
            cidr_mask=25,
            name='PublicSubnet2',
            subnet_type=ec2.SubnetType.PUBLIC)

        vpc = ec2.Vpc(self,
                      'TestVPC',
                      cidr='10.100.0.0/16',
                      subnet_configuration=[
                          private_subnet1, public_subnet1, private_subnet2,
                          public_subnet2
                      ])

        bastion_ssh_sg = ec2.SecurityGroup(self,
                                           'SSHBastion_SecurityGroup',
                                           vpc=vpc,
                                           security_group_name='Bastion_SSH')

        bastion_host = ec2.BastionHostLinux(self, 'Bastion-1', vpc=vpc)
Example #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc = aws_ec2.Vpc(
            self,
            "AnalyticsWorkshopVPC",
            max_azs=2,
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        sg_bastion_host = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an bastion host',
            security_group_name='bastion-host-sg')
        core.Tag.add(sg_bastion_host, 'Name', 'bastion-host-sg')

        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceClass.html
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceSize.html#aws_cdk.aws_ec2.InstanceSize
        ec2_instance_type = aws_ec2.InstanceType.of(
            aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM)

        #XXX: As there are no SSH public keys deployed on this machine,
        # you need to use EC2 Instance Connect with the command
        #  'aws ec2-instance-connect send-ssh-public-key' to provide your SSH public key.
        # https://aws.amazon.com/de/blogs/compute/new-using-amazon-ec2-instance-connect-for-ssh-access-to-your-ec2-instances/
        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=ec2_instance_type,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=sg_bastion_host)

        #TODO: SHOULD restrict IP range allowed to ssh acces
        bastion_host.allow_ssh_access_from(aws_ec2.Peer.ipv4("0.0.0.0/0"))

        #XXX: In order to test data pipeline, add {Kinesis, KinesisFirehose}FullAccess Policy to the bastion host.
        bastion_host.role.add_to_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["kinesis:*"]))
        bastion_host.role.add_to_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["firehose:*"]))

        sg_use_es = aws_ec2.SecurityGroup(
            self,
            "ElasticSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an elasticsearch client',
            security_group_name='use-es-cluster-sg')
        core.Tag.add(sg_use_es, 'Name', 'use-es-cluster-sg')

        sg_es = aws_ec2.SecurityGroup(
            self,
            "ElasticSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an elasticsearch cluster',
            security_group_name='es-cluster-sg')
        core.Tag.add(sg_es, 'Name', 'es-cluster-sg')

        sg_es.add_ingress_rule(peer=sg_es,
                               connection=aws_ec2.Port.all_tcp(),
                               description='es-cluster-sg')
        sg_es.add_ingress_rule(peer=sg_use_es,
                               connection=aws_ec2.Port.all_tcp(),
                               description='use-es-cluster-sg')
        sg_es.add_ingress_rule(peer=sg_bastion_host,
                               connection=aws_ec2.Port.all_tcp(),
                               description='bastion-host-sg')

        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            bucket_name="aws-analytics-immersion-day-{region}-{account}".
            format(region=kwargs['env'].region, account=kwargs['env'].account))

        trans_kinesis_stream = kinesis.Stream(
            self,
            "AnalyticsWorkshopKinesisStreams",
            stream_name='retail-trans')

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "glue:GetTable",
                                        "glue:GetTableVersion",
                                        "glue:GetTableVersions"
                                    ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                resources=[trans_kinesis_stream.stream_arn],
                actions=[
                    "kinesis:DescribeStream", "kinesis:GetShardIterator",
                    "kinesis:GetRecords"
                ]))

        firehose_log_group_name = "/aws/kinesisfirehose/retail-trans"
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(service="logs",
                                    resource="log-group",
                                    resource_name="{}:log-stream:*".format(
                                        firehose_log_group_name),
                                    sep=":")
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name="FirehoseDeliveryRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        trans_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "KinesisFirehoseToS3",
            delivery_stream_name="retail-trans",
            delivery_stream_type="KinesisStreamAsSource",
            kinesis_stream_source_configuration={
                "kinesisStreamArn": trans_kinesis_stream.stream_arn,
                "roleArn": firehose_role.role_arn
            },
            extended_s3_destination_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Delivery"
                },
                "compressionFormat":
                "UNCOMPRESSED",  # [GZIP | HADOOP_SNAPPY | Snappy | UNCOMPRESSED | ZIP]
                "prefix":
                "json-data/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
                "errorOutputPrefix":
                "error-json/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}",
                "roleArn": firehose_role.role_arn
            })

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        es_domain_name = 'retail'
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            "ElasticSearch",
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name=es_domain_name,
            elasticsearch_version="7.4",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(
                        service="es",
                        resource="domain",
                        resource_name="{}/*".format(es_domain_name))
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids
            })
        core.Tag.add(es_cfn_domain, 'Name', 'analytics-workshop-es')

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        s3_lib_bucket = s3.Bucket.from_bucket_name(self, id,
                                                   S3_BUCKET_LAMBDA_LAYER_LIB)
        es_lib_layer = _lambda.LayerVersion(
            self,
            "ESLib",
            layer_version_name="es-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket, "var/es-lib.zip"))

        #XXX: add more than 2 security groups
        # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387
        # https://github.com/aws/aws-cdk/issues/1555
        # https://github.com/aws/aws-cdk/pull/5049
        #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342
        upsert_to_es_lambda_fn = _lambda.Function(
            self,
            "UpsertToES",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertToES",
            handler="upsert_to_es.lambda_handler",
            description="Upsert records into elasticsearch",
            code=_lambda.Code.asset("./src/main/python/UpsertToES"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                #TODO: MUST set appropriate environment variables for your workloads.
                'ES_INDEX': 'retail',
                'ES_TYPE': 'trans',
                'REQUIRED_FIELDS': 'Invoice,StockCode,Customer_ID',
                'REGION_NAME': kwargs['env'].region,
                'DATE_TYPE_FIELDS': 'InvoiceDate'
            },
            timeout=core.Duration.minutes(5),
            layers=[es_lib_layer],
            security_groups=[sg_use_es],
            vpc=vpc)

        trans_kinesis_event_source = KinesisEventSource(
            trans_kinesis_stream,
            batch_size=1000,
            starting_position=_lambda.StartingPosition.LATEST)
        upsert_to_es_lambda_fn.add_event_source(trans_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertToESLogGroup",
            log_group_name="/aws/lambda/UpsertToES",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_es_lambda_fn)

        merge_small_files_lambda_fn = _lambda.Function(
            self,
            "MergeSmallFiles",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="MergeSmallFiles",
            handler="athena_ctas.lambda_handler",
            description="Merge small files in S3",
            code=_lambda.Code.asset("./src/main/python/MergeSmallFiles"),
            environment={
                #TODO: MUST set appropriate environment variables for your workloads.
                'OLD_DATABASE':
                'mydatabase',
                'OLD_TABLE_NAME':
                'retail_trans_json',
                'NEW_DATABASE':
                'mydatabase',
                'NEW_TABLE_NAME':
                'ctas_retail_trans_parquet',
                'WORK_GROUP':
                'primary',
                'OUTPUT_PREFIX':
                's3://{}'.format(
                    os.path.join(s3_bucket.bucket_name,
                                 'parquet-retail-trans')),
                'STAGING_OUTPUT_PREFIX':
                's3://{}'.format(os.path.join(s3_bucket.bucket_name, 'tmp')),
                'COLUMN_NAMES':
                'invoice,stockcode,description,quantity,invoicedate,price,customer_id,country',
            },
            timeout=core.Duration.minutes(5))

        merge_small_files_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["athena:*"]))
        merge_small_files_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "s3:Get*",
                                        "s3:List*",
                                        "s3:AbortMultipartUpload",
                                        "s3:PutObject",
                                    ]))
        merge_small_files_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                resources=["*"],
                actions=[
                    "glue:CreateDatabase", "glue:DeleteDatabase",
                    "glue:GetDatabase", "glue:GetDatabases",
                    "glue:UpdateDatabase", "glue:CreateTable",
                    "glue:DeleteTable", "glue:BatchDeleteTable",
                    "glue:UpdateTable", "glue:GetTable", "glue:GetTables",
                    "glue:BatchCreatePartition", "glue:CreatePartition",
                    "glue:DeletePartition", "glue:BatchDeletePartition",
                    "glue:UpdatePartition", "glue:GetPartition",
                    "glue:GetPartitions", "glue:BatchGetPartition"
                ]))
        merge_small_files_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["lakeformation:GetDataAccess"]))

        lambda_fn_target = aws_events_targets.LambdaFunction(
            merge_small_files_lambda_fn)
        aws_events.Rule(self,
                        "ScheduleRule",
                        schedule=aws_events.Schedule.cron(minute="5"),
                        targets=[lambda_fn_target])

        log_group = aws_logs.LogGroup(
            self,
            "MergeSmallFilesLogGroup",
            log_group_name="/aws/lambda/MergeSmallFiles",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(merge_small_files_lambda_fn)
Example #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #############################################
        ### Setup a basic vpc and security groups ###
        #############################################

        # Create a simple vpc
        vpc = ec2.Vpc(self, "MyVPC", max_azs=3)

        # My existing ssh key pair name
        keypair = 'tom'

        # Dynamically pull ubuntu ami id - needs environment var set CDK_DEFAULT_REGION
        dynamic_ubuntu_ami = ec2.MachineImage.lookup(
            name="*ubuntu-bionic-18.04-amd64-server*", owners=["099720109477"])

        # Security group for our test instance
        my_sg = ec2.SecurityGroup(self,
                                  "my_sg",
                                  vpc=vpc,
                                  description="My sg for testing",
                                  allow_all_outbound=True)
        # Add ssh from anywhere
        my_sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22),
                               "Allow ssh access from anywhere")

        #################################################################
        # Single Ubuntu EC2 instance in Private Subnet in an ASG of 1:1 #
        #################################################################
        asg = autoscaling.AutoScalingGroup(
            self,
            "Ubuntu-ASG-Instance",
            vpc=vpc,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=dynamic_ubuntu_ami,
            key_name=keypair,
        )
        asg.add_security_group(my_sg)  # add our security group, expects object

        ########################################################
        # Single Ubuntu EC2 instance in Private Subnet, no ASG #
        ########################################################
        ubuntu_ami = dynamic_ubuntu_ami.get_image(
            self
        ).image_id  # CfnInstances requires image_id to be the ami string
        instance = ec2.CfnInstance(
            self,
            "Ubuntu-Instance",
            image_id=ubuntu_ami,
            instance_type='m4.large',
            monitoring=True,
            key_name=keypair,
            network_interfaces=[{
                "deviceIndex": "0",
                "associatePublicIpAddress": False,
                "subnetId": vpc.private_subnets[0].subnet_id,
                "groupSet": [my_sg.security_group_id]
            }],
        )

        ###################################################
        # Bastion host to access Ubuntu hosts for testing #
        ###################################################
        host = ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
        )
        host.allow_ssh_access_from(
            ec2.Peer.ipv4("0.0.0.0/0"))  # Restrict this to your IP
        host.instance.instance.add_property_override(
            "KeyName", keypair)  # Add keypair for access unless you use SSM
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.current_dir = os.path.dirname(__file__)

        self.vpc = ec2.Vpc(
            self,
            "VPC",
            cidr="10.0.0.0/21",
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    cidr_mask=28,
                    name="Database",
                    subnet_type=ec2.SubnetType.ISOLATED,
                ),
                ec2.SubnetConfiguration(cidr_mask=28,
                                        name="Private",
                                        subnet_type=ec2.SubnetType.PRIVATE),
                ec2.SubnetConfiguration(cidr_mask=28,
                                        name="Public",
                                        subnet_type=ec2.SubnetType.PUBLIC)
            ],
            nat_gateways=3)

        self.qs_security_group = ec2.SecurityGroup(
            self,
            "quicksight-sg",
            vpc=self.vpc,
            allow_all_outbound=True,
            description="QuickSight security group")

        self.bastion = ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=self.vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC))

        self.bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                     "Internet access SSH")

        self.vpc.add_interface_endpoint(
            "redshift_endpoint",
            service=ec2.InterfaceVpcEndpointAwsService("redshift"))

        self.vpc.add_interface_endpoint(
            "rds_endpoint", service=ec2.InterfaceVpcEndpointAwsService("rds"))

        self.redshift_secret = secrets.Secret(
            self,
            'redshift-admin',
            secret_name='redshift-admin',
            description=
            "This secret has generated admin secret password for Redshift cluster",
            generate_secret_string=secrets.SecretStringGenerator(
                secret_string_template='{"username": "******"}',
                generate_string_key='password',
                password_length=32,
                exclude_characters='"@\\\/',
                exclude_punctuation=True))

        self.rs_security_group = ec2.SecurityGroup(self,
                                                   "redshift-sg",
                                                   vpc=self.vpc,
                                                   allow_all_outbound=True,
                                                   description="Redshift SG")

        self.rs_security_group.add_ingress_rule(self.rs_security_group,
                                                ec2.Port.all_tcp(),
                                                'Redshift-basic')

        self.rs_security_group.add_ingress_rule(
            # https://docs.aws.amazon.com/quicksight/latest/user/regions.html
            ec2.Peer.ipv4('52.23.63.224/27'),
            ec2.Port.tcp(5439),
            'QuickSight-IP')

        self.rs_security_group.add_ingress_rule(self.qs_security_group,
                                                ec2.Port.tcp(5439),
                                                'QuickSight-sg')

        # self.rs_security_group.add_egress_rule(
        #     self.rs_security_group,
        #     ec2.Port.all_tcp(),
        #     'Allow outbound for QuickSight'
        # )

        self.redshift_cluster = redshift.Cluster(
            self,
            "datasource-redshift",
            master_user=redshift.Login(
                master_username="******",
                master_password=self.redshift_secret.secret_value_from_json(
                    'password')),
            vpc=self.vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED),
            security_groups=[self.rs_security_group])

        self.rds_secret = secrets.Secret(
            self,
            'rds-admin',
            secret_name='rds-admin',
            description=
            "This secret has generated admin secret password for RDS cluster",
            generate_secret_string=secrets.SecretStringGenerator(
                secret_string_template='{"username": "******"}',
                generate_string_key='password',
                password_length=32,
                exclude_characters='"@\\\/',
                exclude_punctuation=True))

        self.rds_cluster = rds.DatabaseCluster(
            self,
            "datasource-rds",
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_9),
            instance_props={
                "vpc_subnets": {
                    "subnet_type": ec2.SubnetType.ISOLATED
                },
                "vpc": self.vpc
            },
            credentials=rds.Credentials.from_secret(self.rds_secret))

        self.rds_cluster.connections.allow_default_port_from(
            self.bastion, "EC2 Bastion access Aurora")

        self.rds_cluster.connections.allow_default_port_from(
            self.qs_security_group, "QuickSight-sg")

        self.rds_cluster.connections.allow_default_port_from(
            # https://docs.aws.amazon.com/quicksight/latest/user/regions.html
            ec2.Peer.ipv4('52.23.63.224/27'),
            "QuickSight-IP")

        self.qs_security_group.add_ingress_rule(self.rs_security_group,
                                                ec2.Port.all_tcp(), 'AllTCP')

        for rds_group in self.rds_cluster.connections.security_groups:
            self.qs_security_group.add_ingress_rule(rds_group,
                                                    ec2.Port.all_tcp(),
                                                    'AllTCP')

        # self.qs_security_group.add_egress_rule(
        #     self.rs_security_group,
        #     ec2.Port.all_tcp(),
        #     'AllTCP'
        # )

        core.CfnOutput(self, "vpcId", value=self.vpc.vpc_id)
        core.CfnOutput(self, "redshiftUsername", value="admin")
        core.CfnOutput(self, "redshiftPassword", value="redshift-admin")
        core.CfnOutput(self,
                       "redshiftClusterId",
                       value=self.redshift_cluster.cluster_name)
        core.CfnOutput(self,
                       "redshiftHost",
                       value=self.redshift_cluster.cluster_endpoint.hostname)
        core.CfnOutput(self, "redshiftDB", value="dev")
        core.CfnOutput(self, "rdsUsername", value="administrator")
        core.CfnOutput(self, "rdsPassword", value="rds-admin")
        core.CfnOutput(self,
                       "rdsClusterId",
                       value=self.rds_cluster.cluster_identifier)
        core.CfnOutput(self, "namespace", value="default")
        core.CfnOutput(self, "version", value="1")
        core.CfnOutput(self,
                       "quicksightSecurityGroupId",
                       value=self.qs_security_group.security_group_id)
Example #13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = aws_ec2.Vpc(
            self,
            "OctemberVPC",
            max_azs=2,
            #      subnet_configuration=[{
            #          "cidrMask": 24,
            #          "name": "Public",
            #          "subnetType": aws_ec2.SubnetType.PUBLIC,
            #        },
            #        {
            #          "cidrMask": 24,
            #          "name": "Private",
            #          "subnetType": aws_ec2.SubnetType.PRIVATE
            #        },
            #        {
            #          "cidrMask": 28,
            #          "name": "Isolated",
            #          "subnetType": aws_ec2.SubnetType.ISOLATED,
            #          "reserved": True
            #        }
            #      ],
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        dynamo_db_endpoint = vpc.add_gateway_endpoint(
            "DynamoDbEndpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB)

        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            bucket_name="octember-bizcard-{region}-{account}".format(
                region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID))

        api = apigw.RestApi(
            self,
            "BizcardImageUploader",
            rest_api_name="BizcardImageUploader",
            description="This service serves uploading bizcard images into s3.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            binary_media_types=["image/png", "image/jpg"],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        rest_api_role = aws_iam.Role(
            self,
            "ApiGatewayRoleForS3",
            role_name="ApiGatewayRoleForS3FullAccess",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess")
            ])

        list_objects_responses = [
            apigw.IntegrationResponse(
                status_code="200",
                #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html#aws_cdk.aws_apigateway.IntegrationResponse.response_parameters
                # The response parameters from the backend response that API Gateway sends to the method response.
                # Use the destination as the key and the source as the value:
                #  - The destination must be an existing response parameter in the MethodResponse property.
                #  - The source must be an existing method request parameter or a static value.
                response_parameters={
                    'method.response.header.Timestamp':
                    'integration.response.header.Date',
                    'method.response.header.Content-Length':
                    'integration.response.header.Content-Length',
                    'method.response.header.Content-Type':
                    'integration.response.header.Content-Type'
                }),
            apigw.IntegrationResponse(status_code="400",
                                      selection_pattern="4\d{2}"),
            apigw.IntegrationResponse(status_code="500",
                                      selection_pattern="5\d{2}")
        ]

        list_objects_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses)

        get_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path='/',
            options=list_objects_integration_options)

        api.root.add_method(
            "GET",
            get_s3_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={'method.request.header.Content-Type': False})

        get_s3_folder_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html#aws_cdk.aws_apigateway.IntegrationOptions.request_parameters
            # Specify request parameters as key-value pairs (string-to-string mappings), with a destination as the key and a source as the value.
            # The source must be an existing method request parameter or a static value.
            request_parameters={
                "integration.request.path.bucket": "method.request.path.folder"
            })

        get_s3_folder_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}",
            options=get_s3_folder_integration_options)

        s3_folder = api.root.add_resource('{folder}')
        s3_folder.add_method(
            "GET",
            get_s3_folder_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True
            })

        get_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            request_parameters={
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        get_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}/{object}",
            options=get_s3_item_integration_options)

        s3_item = s3_folder.add_resource('{item}')
        s3_item.add_method(
            "GET",
            get_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        put_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[
                apigw.IntegrationResponse(status_code="200"),
                apigw.IntegrationResponse(status_code="400",
                                          selection_pattern="4\d{2}"),
                apigw.IntegrationResponse(status_code="500",
                                          selection_pattern="5\d{2}")
            ],
            request_parameters={
                "integration.request.header.Content-Type":
                "method.request.header.Content-Type",
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        put_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="PUT",
            path="{bucket}/{object}",
            options=put_s3_item_integration_options)

        s3_item.add_method(
            "PUT",
            put_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        ddb_table = dynamodb.Table(
            self,
            "BizcardImageMetaInfoDdbTable",
            table_name="OctemberBizcardImgMeta",
            partition_key=dynamodb.Attribute(
                name="image_id", type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PROVISIONED,
            read_capacity=15,
            write_capacity=5)

        img_kinesis_stream = kinesis.Stream(
            self, "BizcardImagePath", stream_name="octember-bizcard-image")

        # create lambda function
        trigger_textract_lambda_fn = _lambda.Function(
            self,
            "TriggerTextExtractorFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="TriggerTextExtractorFromImage",
            handler="trigger_text_extract_from_s3_image.lambda_handler",
            description="Trigger to extract text from an image in S3",
            code=_lambda.Code.asset(
                "./src/main/python/TriggerTextExtractFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': img_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        ddb_table_rw_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            resources=[ddb_table.table_arn],
            actions=[
                "dynamodb:BatchGetItem", "dynamodb:Describe*",
                "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query",
                "dynamodb:Scan", "dynamodb:BatchWriteItem",
                "dynamodb:DeleteItem", "dynamodb:PutItem",
                "dynamodb:UpdateItem", "dax:Describe*", "dax:List*",
                "dax:GetItem", "dax:BatchGetItem", "dax:Query", "dax:Scan",
                "dax:BatchWriteItem", "dax:DeleteItem", "dax:PutItem",
                "dax:UpdateItem"
            ])

        trigger_textract_lambda_fn.add_to_role_policy(
            ddb_table_rw_policy_statement)
        trigger_textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[img_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3_event_filter = s3.NotificationKeyFilter(prefix="bizcard-raw-img/",
                                                   suffix=".jpg")
        s3_event_source = S3EventSource(s3_bucket,
                                        events=[s3.EventType.OBJECT_CREATED],
                                        filters=[s3_event_filter])
        trigger_textract_lambda_fn.add_event_source(s3_event_source)

        #XXX: https://github.com/aws/aws-cdk/issues/2240
        # To avoid to create extra Lambda Functions with names like LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8a
        # if log_retention=aws_logs.RetentionDays.THREE_DAYS is added to the constructor props
        log_group = aws_logs.LogGroup(
            self,
            "TriggerTextractLogGroup",
            log_group_name="/aws/lambda/TriggerTextExtractorFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(trigger_textract_lambda_fn)

        text_kinesis_stream = kinesis.Stream(
            self, "BizcardTextData", stream_name="octember-bizcard-txt")

        textract_lambda_fn = _lambda.Function(
            self,
            "GetTextFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="GetTextFromImage",
            handler="get_text_from_s3_image.lambda_handler",
            description="extract text from an image in S3",
            code=_lambda.Code.asset("./src/main/python/GetTextFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': text_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        textract_lambda_fn.add_to_role_policy(ddb_table_rw_policy_statement)
        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["textract:*"]))

        img_kinesis_event_source = KinesisEventSource(
            img_kinesis_stream,
            batch_size=100,
            starting_position=_lambda.StartingPosition.LATEST)
        textract_lambda_fn.add_event_source(img_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "GetTextFromImageLogGroup",
            log_group_name="/aws/lambda/GetTextFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(textract_lambda_fn)

        sg_use_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard elasticsearch client',
            security_group_name='use-octember-bizcard-es')
        core.Tags.of(sg_use_bizcard_es).add('Name', 'use-octember-bizcard-es')

        sg_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard elasticsearch',
            security_group_name='octember-bizcard-es')
        core.Tags.of(sg_bizcard_es).add('Name', 'octember-bizcard-es')

        sg_bizcard_es.add_ingress_rule(peer=sg_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='octember-bizcard-es')
        sg_bizcard_es.add_ingress_rule(peer=sg_use_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='use-octember-bizcard-es')

        sg_ssh_access = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for bastion host',
            security_group_name='octember-bastion-host-sg')
        core.Tags.of(sg_ssh_access).add('Name', 'octember-bastion-host')
        sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                                       connection=aws_ec2.Port.tcp(22),
                                       description='ssh access')

        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=aws_ec2.InstanceType('t3.nano'),
            security_group=sg_ssh_access,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
        bastion_host.instance.add_security_group(sg_use_bizcard_es)

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            'BizcardSearch',
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name="octember-bizcard",
            elasticsearch_version="7.9",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(service="es",
                                    resource="domain",
                                    resource_name="octember-bizcard/*")
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_bizcard_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids
            })
        core.Tags.of(es_cfn_domain).add('Name', 'octember-bizcard-es')

        s3_lib_bucket_name = self.node.try_get_context("lib_bucket_name")

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        s3_lib_bucket = s3.Bucket.from_bucket_name(self, id,
                                                   s3_lib_bucket_name)
        es_lib_layer = _lambda.LayerVersion(
            self,
            "ESLib",
            layer_version_name="es-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-es-lib.zip"))

        redis_lib_layer = _lambda.LayerVersion(
            self,
            "RedisLib",
            layer_version_name="redis-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-redis-lib.zip"))

        #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342
        upsert_to_es_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToES",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToElasticSearch",
            handler="upsert_bizcard_to_es.lambda_handler",
            description="Upsert bizcard text into elasticsearch",
            code=_lambda.Code.asset("./src/main/python/UpsertBizcardToES"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard'
            },
            timeout=core.Duration.minutes(5),
            layers=[es_lib_layer],
            security_groups=[sg_use_bizcard_es],
            vpc=vpc)

        text_kinesis_event_source = KinesisEventSource(
            text_kinesis_stream,
            batch_size=99,
            starting_position=_lambda.StartingPosition.LATEST)
        upsert_to_es_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToESLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToElasticSearch",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_es_lambda_fn)

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "glue:GetTable",
                                        "glue:GetTableVersion",
                                        "glue:GetTableVersions"
                                    ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:DescribeStream",
                                        "kinesis:GetShardIterator",
                                        "kinesis:GetRecords"
                                    ]))

        firehose_log_group_name = "/aws/kinesisfirehose/octember-bizcard-txt-to-s3"
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(service="logs",
                                    resource="log-group",
                                    resource_name="{}:log-stream:*".format(
                                        firehose_log_group_name),
                                    sep=":")
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name="FirehoseDeliveryRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        bizcard_text_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "BizcardTextToS3",
            delivery_stream_name="octember-bizcard-txt-to-s3",
            delivery_stream_type="KinesisStreamAsSource",
            kinesis_stream_source_configuration={
                "kinesisStreamArn": text_kinesis_stream.stream_arn,
                "roleArn": firehose_role.role_arn
            },
            extended_s3_destination_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Delivery"
                },
                "compressionFormat": "GZIP",
                "prefix": "bizcard-text/",
                "roleArn": firehose_role.role_arn
            })

        sg_use_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache client',
            security_group_name='use-octember-bizcard-es-cache')
        core.Tags.of(sg_use_bizcard_es_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache',
            security_group_name='octember-bizcard-es-cache')
        core.Tags.of(sg_bizcard_es_cache).add('Name',
                                              'octember-bizcard-es-cache')

        sg_bizcard_es_cache.add_ingress_rule(
            peer=sg_use_bizcard_es_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-es-cache')

        es_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "QueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-es-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-es-cache')

        es_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardSearchQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-es-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=es_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-es-cache',
            vpc_security_group_ids=[sg_bizcard_es_cache.security_group_id])

        #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster.
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname
        es_query_cache.add_depends_on(es_query_cache_subnet_group)

        #XXX: add more than 2 security groups
        # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387
        # https://github.com/aws/aws-cdk/issues/1555
        # https://github.com/aws/aws-cdk/pull/5049
        bizcard_search_lambda_fn = _lambda.Function(
            self,
            "BizcardSearchServer",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardSearchProxy",
            handler="es_search_bizcard.lambda_handler",
            description="Proxy server to search bizcard text",
            code=_lambda.Code.asset("./src/main/python/SearchBizcard"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard',
                'ELASTICACHE_HOST': es_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[es_lib_layer, redis_lib_layer],
            security_groups=[sg_use_bizcard_es, sg_use_bizcard_es_cache],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        search_api = apigw.LambdaRestApi(
            self,
            "BizcardSearchAPI",
            handler=bizcard_search_lambda_fn,
            proxy=False,
            rest_api_name="BizcardSearch",
            description="This service serves searching bizcard text.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_search = search_api.root.add_resource('search')
        bizcard_search.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sg_use_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db client',
            security_group_name='use-octember-bizcard-neptune')
        core.Tags.of(sg_use_bizcard_graph_db).add(
            'Name', 'use-octember-bizcard-neptune')

        sg_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db',
            security_group_name='octember-bizcard-neptune')
        core.Tags.of(sg_bizcard_graph_db).add('Name',
                                              'octember-bizcard-neptune')

        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='octember-bizcard-neptune')
        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_use_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='use-octember-bizcard-neptune')

        bizcard_graph_db_subnet_group = aws_neptune.CfnDBSubnetGroup(
            self,
            "NeptuneSubnetGroup",
            db_subnet_group_description=
            "subnet group for octember-bizcard-neptune",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            db_subnet_group_name='octember-bizcard-neptune')

        bizcard_graph_db = aws_neptune.CfnDBCluster(
            self,
            "BizcardGraphDB",
            availability_zones=vpc.availability_zones,
            db_subnet_group_name=bizcard_graph_db_subnet_group.
            db_subnet_group_name,
            db_cluster_identifier="octember-bizcard",
            backup_retention_period=1,
            preferred_backup_window="08:45-09:15",
            preferred_maintenance_window="sun:18:00-sun:18:30",
            vpc_security_group_ids=[sg_bizcard_graph_db.security_group_id])
        bizcard_graph_db.add_depends_on(bizcard_graph_db_subnet_group)

        bizcard_graph_db_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[0],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_instance.add_depends_on(bizcard_graph_db)

        bizcard_graph_db_replica_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBReplicaInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[-1],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard-replica",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_replica_instance.add_depends_on(bizcard_graph_db)
        bizcard_graph_db_replica_instance.add_depends_on(
            bizcard_graph_db_instance)

        gremlinpython_lib_layer = _lambda.LayerVersion(
            self,
            "GremlinPythonLib",
            layer_version_name="gremlinpython-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(
                s3_lib_bucket, "var/octember-gremlinpython-lib.zip"))

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        upsert_to_neptune_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToGraphDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToNeptune",
            handler="upsert_bizcard_to_graph_db.lambda_handler",
            description="Upsert bizcard into neptune",
            code=_lambda.Code.asset(
                "./src/main/python/UpsertBizcardToGraphDB"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port
            },
            timeout=core.Duration.minutes(5),
            layers=[gremlinpython_lib_layer],
            security_groups=[sg_use_bizcard_graph_db],
            vpc=vpc)

        upsert_to_neptune_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToGraphDBLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToNeptune",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_neptune_lambda_fn)

        sg_use_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache client',
            security_group_name='use-octember-bizcard-neptune-cache')
        core.Tags.of(sg_use_bizcard_neptune_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache',
            security_group_name='octember-bizcard-neptune-cache')
        core.Tags.of(sg_bizcard_neptune_cache).add(
            'Name', 'octember-bizcard-neptune-cache')

        sg_bizcard_neptune_cache.add_ingress_rule(
            peer=sg_use_bizcard_neptune_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-neptune-cache')

        recomm_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "RecommQueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-neptune-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-neptune-cache')

        recomm_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardRecommQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-neptune-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=recomm_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-neptune-cache',
            vpc_security_group_ids=[
                sg_bizcard_neptune_cache.security_group_id
            ])

        recomm_query_cache.add_depends_on(recomm_query_cache_subnet_group)

        bizcard_recomm_lambda_fn = _lambda.Function(
            self,
            "BizcardRecommender",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardRecommender",
            handler="neptune_recommend_bizcard.lambda_handler",
            description="This service serves PYMK(People You May Know).",
            code=_lambda.Code.asset("./src/main/python/RecommendBizcard"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_read_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port,
                'ELASTICACHE_HOST':
                recomm_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[gremlinpython_lib_layer, redis_lib_layer],
            security_groups=[
                sg_use_bizcard_graph_db, sg_use_bizcard_neptune_cache
            ],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        recomm_api = apigw.LambdaRestApi(
            self,
            "BizcardRecommendAPI",
            handler=bizcard_recomm_lambda_fn,
            proxy=False,
            rest_api_name="BizcardRecommend",
            description="This service serves PYMK(People You May Know).",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_recomm = recomm_api.root.add_resource('pymk')
        bizcard_recomm.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument()
        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:s3:::aws-neptune-notebook",
                        "arn:aws:s3:::aws-neptune-notebook/*"
                    ],
                    "actions": ["s3:GetObject", "s3:ListBucket"]
                }))

        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:neptune-db:{region}:{account}:{cluster_id}/*".
                        format(region=core.Aws.REGION,
                               account=core.Aws.ACCOUNT_ID,
                               cluster_id=bizcard_graph_db.
                               attr_cluster_resource_id)
                    ],
                    "actions": ["neptune-db:connect"]
                }))

        sagemaker_notebook_role = aws_iam.Role(
            self,
            'SageMakerNotebookForNeptuneWorkbenchRole',
            role_name='AWSNeptuneNotebookRole-OctemberBizcard',
            assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={
                'AWSNeptuneNotebook': sagemaker_notebook_role_policy_doc
            })

        neptune_wb_lifecycle_content = '''#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_HOST={NeptuneClusterEndpoint}" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_PORT={NeptuneClusterPort}" >> ~/.bashrc
echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=''" >> ~/.bashrc
echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc
aws s3 cp s3://aws-neptune-notebook/graph_notebook.tar.gz /tmp/graph_notebook.tar.gz
rm -rf /tmp/graph_notebook
tar -zxvf /tmp/graph_notebook.tar.gz -C /tmp
/tmp/graph_notebook/install.sh
EOF
'''.format(NeptuneClusterEndpoint=bizcard_graph_db.attr_endpoint,
           NeptuneClusterPort=bizcard_graph_db.attr_port,
           AWS_Region=core.Aws.REGION)

        neptune_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty(
            content=core.Fn.base64(neptune_wb_lifecycle_content))

        neptune_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig(
            self,
            'NpetuneWorkbenchLifeCycleConfig',
            notebook_instance_lifecycle_config_name=
            'AWSNeptuneWorkbenchOctemberBizcardLCConfig',
            on_start=[neptune_wb_lifecycle_config_prop])

        neptune_workbench = aws_sagemaker.CfnNotebookInstance(
            self,
            'NeptuneWorkbench',
            instance_type='ml.t2.medium',
            role_arn=sagemaker_notebook_role.role_arn,
            lifecycle_config_name=neptune_wb_lifecycle_config.
            notebook_instance_lifecycle_config_name,
            notebook_instance_name='OctemberBizcard-NeptuneWorkbench',
            root_access='Disabled',
            security_group_ids=[sg_use_bizcard_graph_db.security_group_name],
            subnet_id=bizcard_graph_db_subnet_group.subnet_ids[0])
Example #14
0
    def __init__(self, scope: core.Construct, construct_id: str, env, **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)
        
        # The code that defines your stack goes here
        if self.node.try_get_context("tags"):
            self.user_defined_tags = self.node.try_get_context("tags").split(' ')
        else:
            self.user_defined_tags = None

        vpc = ec2.Vpc(self, "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PUBLIC,
                name="Public",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PRIVATE,
                name="Private",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.ISOLATED,
                name="DB",
                cidr_mask=24
            )
            ],
            nat_gateways=2
            )
        self.vpc = vpc


        # Route53
        int_zone = r53.PrivateHostedZone(self, r53_zone_name,
                                         zone_name = 'int.emqx',
                                         vpc = vpc
        )

        self.int_zone = int_zone

        # Define cfn parameters
        # ec2_type = CfnParameter(self, "ec2-instance-type",
        #     type="String", default="m5.2xlarge",
        #     description="Specify the instance type you want").value_as_string
        
        key_name = CfnParameter(self, "ssh key",
            type="String", default="key_ireland",
            description="Specify your SSH key").value_as_string

        sg = ec2.SecurityGroup(self, id = 'sg_int', vpc = vpc)
        self.sg = sg

        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'SSH frm anywhere')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(1883), 'MQTT TCP Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8883), 'MQTT TCP/TLS Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.udp(14567), 'MQTT Quic Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(18083), 'WEB UI')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4369), 'EMQX dist port 1')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4370), 'EMQX dist port 2')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8081), 'EMQX dashboard')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2379), 'etcd client port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2380), 'etcd peer port')

         # Create Bastion Server
        bastion = ec2.BastionHostLinux(self, "Bastion",
                                       vpc=vpc,
                                       subnet_selection=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                                       instance_name="BastionHostLinux",
                                       instance_type=ec2.InstanceType(instance_type_identifier="t3.nano"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(
            ec2.Port.tcp(22), "Internet access SSH")
    
        # Create NLB
        nlb = elb.NetworkLoadBalancer(self, "emq-elb",
                                      vpc=vpc,
                                      internet_facing=False, 
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        r53.ARecord(self, "AliasRecord",
                    zone = int_zone,
                    record_name = loadbalancer_dnsname,
                    target = r53.RecordTarget.from_alias(r53_targets.LoadBalancerTarget(nlb))
                    )

        self.nlb = nlb

        listener = nlb.add_listener("port1883", port=1883)
        listenerTLS = nlb.add_listener("port8883", port=8883) # TLS, emqx terminataion
        listenerQuic = nlb.add_listener("port14567", port=14567, protocol=elbv2.Protocol.UDP)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        # asg = autoscaling.AutoScalingGroup(self, "emq-asg",
        #                                    vpc=vpc,
        #                                    vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
        #                                    instance_type=ec2.InstanceType(
        #                                        instance_type_identifier=ec2_type),
        #                                    machine_image=linux_ami,
        #                                    security_group = sg,
        #                                    key_name=key_name,
        #                                    user_data=ec2.UserData.custom(user_data),
        #                                    health_check=HealthCheck.elb(grace=Duration.seconds(60)),
        #                                    desired_capacity=3,
        #                                    min_capacity=2,
        #                                    max_capacity=4
        # )

        # if self.user_defined_tags:
        #     core.Tags.of(asg).add(*self.user_defined_tags)

        # # NLB cannot associate with a security group therefore NLB object has no Connection object
        # # Must modify manuall inbound rule of the newly created asg security group to allow access
        # # from NLB IP only
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(1883), "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(18083), "Allow NLB access WEB UI")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(8081), "Allow emqx cluster dashboard access")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from(bastion,
        #     ec2.Port.tcp(22), "Allow SSH from the bastion only")

        self.setup_emqx(numEmqx, vpc, int_zone, sg, key_name)

        listener.add_targets('ec2',
                             port=1883,
                             targets=
                                 [ target.InstanceTarget(x)
                                   for x in self.emqx_vms])
        # @todo we need ssl terminataion
        listenerUI.add_targets('ec2',
                               port=18083,
                               targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerQuic.add_targets('ec2',
                                 port=14567,
                                 protocol=elbv2.Protocol.UDP,
                                 targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerTLS.add_targets('ec2',
                                port=8883,
                                targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        #self.setup_monitoring()

        self.setup_etcd(vpc, int_zone, sg, key_name)
        self.setup_loadgen(numLg, vpc, int_zone, sg, key_name, nlb.load_balancer_dns_name)

        self.setup_monitoring()

        core.CfnOutput(self, "Output",
            value=nlb.load_balancer_dns_name)
        core.CfnOutput(self, "SSH Entrypoint",
                       value=bastion.instance_public_ip)
        core.CfnOutput(self, "SSH cmds",
                       value="ssh -A -l ec2-user %s -L8888:%s:80 -L 9999:%s:80 -L 13000:%s:3000"
                       % (bastion.instance_public_ip, nlb.load_balancer_dns_name, self.mon_lb, self.mon_lb)
        )
Example #15
0
    def __init__(self, scope: core.Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        # The code that defines your stack goes here

        vpc = ec2.Vpc(
            self,
            "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.ISOLATED,
                                        name="DB",
                                        cidr_mask=24)
            ],
            nat_gateways=2)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
            description="Specify the instance type you want").value_as_string

        key_name = CfnParameter(
            self,
            "ssh key",
            type="String",
            default="key_ireland",
            description="Specify your SSH key").value_as_string
        # Create Bastion Server
        bastion = ec2.BastionHostLinux(
            self,
            "Bastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="BastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create NLB
        nlb = elb.NetworkLoadBalancer(self,
                                      "emq-elb",
                                      vpc=vpc,
                                      internet_facing=True,
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        listener = nlb.add_listener("port1883", port=1883)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        asg = autoscaling.AutoScalingGroup(
            self,
            "emq-asg",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            health_check=HealthCheck.elb(grace=Duration.seconds(60)),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=4)

        user_defined_tags = self.node.try_get_context("tags")

        if user_defined_tags:
            tags = user_defined_tags.split(' ')
            core.Tags.of(asg).add(*tags)

        # NLB cannot associate with a security group therefore NLB object has no Connection object
        # Must modify manuall inbound rule of the newly created asg security group to allow access
        # from NLB IP only
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(1883),
            "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(18083),
                                            "Allow NLB access WEB UI")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")

        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from(bastion, ec2.Port.tcp(22),
                                   "Allow SSH from the bastion only")
        listener.add_targets("addTargetGroup", port=1883, targets=[asg])

        # @todo we need ssl terminataion
        # listenerUI.add_targets("addTargetGroup",
        #     port=18083,
        #     targets=[asg])
        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name)
Example #16
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # vpc_name = self.node.try_get_context("vpc_name")
        # vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC",
        #   is_default=True,
        #   vpc_name=vpc_name)
        vpc = aws_ec2.Vpc(
            self,
            "EKKStackVPC",
            max_azs=2,
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceClass.html
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceSize.html#aws_cdk.aws_ec2.InstanceSize
        ec2_instance_type = aws_ec2.InstanceType.of(
            aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM)

        sg_bastion_host = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an bastion host',
            security_group_name='bastion-host-sg')
        cdk.Tags.of(sg_bastion_host).add('Name', 'bastion-host-sg')

        #XXX: As there are no SSH public keys deployed on this machine,
        # you need to use EC2 Instance Connect with the command
        #  'aws ec2-instance-connect send-ssh-public-key' to provide your SSH public key.
        # https://aws.amazon.com/de/blogs/compute/new-using-amazon-ec2-instance-connect-for-ssh-access-to-your-ec2-instances/
        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=ec2_instance_type,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=sg_bastion_host)

        #TODO: SHOULD restrict IP range allowed to ssh acces
        bastion_host.allow_ssh_access_from(aws_ec2.Peer.ipv4("0.0.0.0/0"))

        sg_use_es = aws_ec2.SecurityGroup(
            self,
            "ElasticSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an elasticsearch client',
            security_group_name='use-es-cluster-sg')
        cdk.Tags.of(sg_use_es).add('Name', 'use-es-cluster-sg')

        sg_es = aws_ec2.SecurityGroup(
            self,
            "ElasticSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an elasticsearch cluster',
            security_group_name='es-cluster-sg')
        cdk.Tags.of(sg_es).add('Name', 'es-cluster-sg')

        sg_es.add_ingress_rule(peer=sg_es,
                               connection=aws_ec2.Port.all_tcp(),
                               description='es-cluster-sg')
        sg_es.add_ingress_rule(peer=sg_use_es,
                               connection=aws_ec2.Port.all_tcp(),
                               description='use-es-cluster-sg')
        sg_es.add_ingress_rule(peer=sg_bastion_host,
                               connection=aws_ec2.Port.all_tcp(),
                               description='bastion-host-sg')

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        ES_DOMAIN_NAME = self.node.try_get_context("es_domain_name")
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            "ElasticSearch",
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name=ES_DOMAIN_NAME,
            elasticsearch_version="7.10",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(
                        service="es",
                        resource="domain",
                        resource_name="{}/*".format(ES_DOMAIN_NAME))
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT).subnet_ids
            })
        cdk.Tags.of(es_cfn_domain).add('Name', ES_DOMAIN_NAME)

        S3_BUCKET_SUFFIX = ''.join(
            random.sample((string.ascii_lowercase + string.digits), k=7))
        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            removal_policy=cdk.RemovalPolicy.
            DESTROY,  #XXX: Default: cdk.RemovalPolicy.RETAIN - The bucket will be orphaned
            bucket_name="ekk-stack-{region}-{suffix}".format(
                region=cdk.Aws.REGION, suffix=S3_BUCKET_SUFFIX))

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                resources=["*"],
                actions=[
                    "ec2:DescribeVpcs", "ec2:DescribeVpcAttribute",
                    "ec2:DescribeSubnets", "ec2:DescribeSecurityGroups",
                    "ec2:DescribeNetworkInterfaces",
                    "ec2:CreateNetworkInterface",
                    "ec2:CreateNetworkInterfacePermission",
                    "ec2:DeleteNetworkInterface"
                ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[
                                        es_cfn_domain.attr_arn,
                                        "{}/*".format(es_cfn_domain.attr_arn)
                                    ],
                                    actions=[
                                        "es:DescribeElasticsearchDomain",
                                        "es:DescribeElasticsearchDomains",
                                        "es:DescribeElasticsearchDomainConfig",
                                        "es:ESHttpPost", "es:ESHttpPut"
                                    ]))

        ES_INDEX_NAME = self.node.try_get_context("es_index_name")

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                resources=[
                    es_cfn_domain.attr_arn,
                    "{}/*".format(es_cfn_domain.attr_arn)
                ],
                # resources=[
                #   "{aes_arn}/_all/_settings".format(aes_arn=es_cfn_domain.attr_arn),
                #   "{aes_arn}/_cluster/stats".format(aes_arn=es_cfn_domain.attr_arn),
                #   "{aes_arn}/{es_index_name}*/_mapping".format(aes_arn=es_cfn_domain.attr_arn, es_index_name=ES_INDEX_NAME),
                #   "{aes_arn}/_nodes".format(aes_arn=es_cfn_domain.attr_arn),
                #   "{aes_arn}/_nodes/*/stats".format(aes_arn=es_cfn_domain.attr_arn),
                #   "{aes_arn}/_stats".format(aes_arn=es_cfn_domain.attr_arn),
                #   "{aes_arn}/{es_index_name}*/_stats".format(aes_arn=es_cfn_domain.attr_arn, es_index_name=ES_INDEX_NAME)
                # ],
                actions=["es:ESHttpGet"]))

        firehose_log_group_name = "/aws/kinesisfirehose/{}".format(
            ES_INDEX_NAME)
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(
                        service="logs",
                        resource="log-group",
                        resource_name="{}:log-stream:*".format(
                            firehose_log_group_name),
                        arn_format=cdk.ArnFormat.COLON_RESOURCE_NAME)
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "KinesisFirehoseServiceRole",
            role_name="KinesisFirehoseServiceRole-{es_index}-{region}".format(
                es_index=ES_INDEX_NAME, region=cdk.Aws.REGION),
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        es_dest_vpc_config = aws_kinesisfirehose.CfnDeliveryStream.VpcConfigurationProperty(
            role_arn=firehose_role.role_arn,
            security_group_ids=[sg_use_es.security_group_id],
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT).subnet_ids)

        es_dest_config = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchDestinationConfigurationProperty(
            index_name=ES_INDEX_NAME,
            role_arn=firehose_role.role_arn,
            s3_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Backup"
                },
                "compressionFormat":
                "UNCOMPRESSED",  # [GZIP | HADOOP_SNAPPY | Snappy | UNCOMPRESSED | ZIP]
                # Kinesis Data Firehose automatically appends the “YYYY/MM/dd/HH/” UTC prefix to delivered S3 files. You can also specify
                # an extra prefix in front of the time format and add "/" to the end to have it appear as a folder in the S3 console.
                "prefix": "{}/".format(ES_INDEX_NAME),
                "roleArn": firehose_role.role_arn
            },
            buffering_hints={
                "intervalInSeconds": 60,
                "sizeInMBs": 1
            },
            cloud_watch_logging_options={
                "enabled": True,
                "logGroupName": firehose_log_group_name,
                "logStreamName": "ElasticsearchDelivery"
            },
            domain_arn=es_cfn_domain.attr_arn,
            index_rotation_period=
            "NoRotation",  # [NoRotation | OneDay | OneHour | OneMonth | OneWeek]
            retry_options={"durationInSeconds": 60},
            s3_backup_mode=
            "FailedDocumentsOnly",  # [AllDocuments | FailedDocumentsOnly]
            vpc_configuration=es_dest_vpc_config)

        firehose_to_es_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "KinesisFirehoseToES",
            delivery_stream_name=ES_INDEX_NAME,
            delivery_stream_type="DirectPut",
            elasticsearch_destination_configuration=es_dest_config,
            tags=[{
                "key": "Name",
                "value": ES_DOMAIN_NAME
            }])
Example #17
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc = aws_ec2.Vpc(
            self,
            "ElasticsearchHolVPC",
            max_azs=2,
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceClass.html
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceSize.html#aws_cdk.aws_ec2.InstanceSize
        ec2_instance_type = aws_ec2.InstanceType.of(
            aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM)

        sg_bastion_host = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an bastion host',
            security_group_name='bastion-host-sg')
        cdk.Tags.of(sg_bastion_host).add('Name', 'bastion-host-sg')

        #XXX: As there are no SSH public keys deployed on this machine,
        # you need to use EC2 Instance Connect with the command
        #  'aws ec2-instance-connect send-ssh-public-key' to provide your SSH public key.
        # https://aws.amazon.com/de/blogs/compute/new-using-amazon-ec2-instance-connect-for-ssh-access-to-your-ec2-instances/
        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=ec2_instance_type,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=sg_bastion_host)

        #TODO: SHOULD restrict IP range allowed to ssh acces
        bastion_host.allow_ssh_access_from(aws_ec2.Peer.ipv4("0.0.0.0/0"))

        sg_use_es = aws_ec2.SecurityGroup(
            self,
            "ElasticSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an elasticsearch client',
            security_group_name='use-es-cluster-sg')
        cdk.Tags.of(sg_use_es).add('Name', 'use-es-cluster-sg')

        sg_es = aws_ec2.SecurityGroup(
            self,
            "ElasticSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an elasticsearch cluster',
            security_group_name='es-cluster-sg')
        cdk.Tags.of(sg_es).add('Name', 'es-cluster-sg')

        sg_es.add_ingress_rule(peer=sg_es,
                               connection=aws_ec2.Port.all_tcp(),
                               description='es-cluster-sg')
        sg_es.add_ingress_rule(peer=sg_use_es,
                               connection=aws_ec2.Port.all_tcp(),
                               description='use-es-cluster-sg')
        sg_es.add_ingress_rule(peer=sg_bastion_host,
                               connection=aws_ec2.Port.all_tcp(),
                               description='bastion-host-sg')

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        es_domain_name = 'es-hol'
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            "ElasticSearch",
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name=es_domain_name,
            elasticsearch_version="7.7",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(
                        service="es",
                        resource="domain",
                        resource_name="{}/*".format(es_domain_name))
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT).subnet_ids
            })
        cdk.Tags.of(es_cfn_domain).add('Name', 'es-hol')

        cdk.CfnOutput(self,
                      'BastionHostId',
                      value=bastion_host.instance_id,
                      export_name='BastionHostId')
        cdk.CfnOutput(self,
                      'BastionHostPublicDNSName',
                      value=bastion_host.instance_public_dns_name,
                      export_name='BastionHostPublicDNSName')
        cdk.CfnOutput(self,
                      'ESDomainEndpoint',
                      value=es_cfn_domain.attr_domain_endpoint,
                      export_name='ESDomainEndpoint')
        cdk.CfnOutput(
            self,
            'ESDashboardsURL',
            value=f"{es_cfn_domain.attr_domain_endpoint}/_dashboards/",
            export_name='ESDashboardsURL')
Example #18
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code from zhxinyua to create VPC, s3_endpoint, bastion, EC2, EBS, Cloudwatch event rule stop EC2, Backup for EC2

        # create a new VPC
        vpc_new = aws_ec2.Vpc(self, "VpcFromCDK", cidr="10.0.0.0/16")
        vpc_new.add_gateway_endpoint(
            "S3Endpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.S3,
            # Add only to ISOLATED subnets
            subnets=[
                aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PUBLIC)
            ])

        # only allow a specific rang of IP to conncet bastion
        # BastionHostLinux support two way to connect, one is SSM, second is EC2 Instance Connect
        # EC2 Instance Connect are not supportd in CN
        host_bastion = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc_new,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))

        # write your own IP rang to access this bastion instead of 1.2.3.4/32
        host_bastion.allow_ssh_access_from(aws_ec2.Peer.ipv4("1.2.3.4/32"))

        # use amazon linux as OS
        amzn_linux = aws_ec2.MachineImage.latest_amazon_linux(
            generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX,
            edition=aws_ec2.AmazonLinuxEdition.STANDARD,
            virtualization=aws_ec2.AmazonLinuxVirt.HVM,
            storage=aws_ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # secure group
        my_security_group = aws_ec2.SecurityGroup(
            self,
            "SecurityGroup",
            vpc=vpc_new,
            description="SecurityGroup from CDK",
            security_group_name="CDK SecurityGroup",
            allow_all_outbound=True,
        )

        my_security_group.add_ingress_rule(aws_ec2.Peer.ipv4('10.0.0.0/16'),
                                           aws_ec2.Port.tcp(22),
                                           "allow ssh access from the VPC")

        # set up an web instance in public subnet
        work_server = aws_ec2.Instance(
            self,
            "WebInstance",
            instance_type=aws_ec2.InstanceType("Write a EC2 instance type"),
            machine_image=amzn_linux,
            vpc=vpc_new,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=my_security_group,
            key_name="Your SSH key pair name")

        # allow web connect
        work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(80),
                                                    "allow http from world")
        work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(443),
                                                    "allow https from world")

        # set a second ebs to web instance
        work_server.instance.add_property_override(
            "BlockDeviceMappings", [{
                "DeviceName": "/dev/sdb",
                "Ebs": {
                    "VolumeSize": "30",
                    "VolumeType": "gp2",
                    "DeleteOnTermination": "true"
                }
            }])

        # Cloudwatch event rule to stop instances every day in 15:00 UTC
        # they only use javascript SDK to call AWS API
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_events_targets/AwsApi.html
        stop_EC2 = AwsApi(
            service="EC2",
            action="stopInstances",
            parameters={
                "InstanceIds":
                [work_server.instance_id, host_bastion.instance_id]
            })

        Rule(self,
             "ScheduleRule",
             schedule=Schedule.cron(minute="0", hour="15"),
             targets=[stop_EC2])

        # AWS backup part
        # create a BackupVault
        vault = backup.BackupVault(self,
                                   "BackupVault",
                                   backup_vault_name="CDK_Backup_Vault")

        # create a BackupPlan
        plan = backup.BackupPlan(self,
                                 "AWS-Backup-Plan",
                                 backup_plan_name="CDK_Backup")

        # add buackup resources with two way for two resources
        plan.add_selection(
            "Selection",
            resources=[
                backup.BackupResource.from_ec2_instance(work_server),
                backup.BackupResource.from_tag("Name", "BastionHost")
            ])

        # details with backup rules
        plan.add_rule(
            backup.BackupPlanRule(
                backup_vault=vault,
                rule_name="CDK_Backup_Rule",
                schedule_expression=Schedule.cron(minute="0",
                                                  hour="16",
                                                  day="1",
                                                  month="1-12"),
                delete_after=Duration.days(130),
                move_to_cold_storage_after=Duration.days(10)))

        # output information after deploy
        output = CfnOutput(self,
                           "BastionHost_information",
                           value=host_bastion.instance_public_ip,
                           description="BastionHost's Public IP")
        output = CfnOutput(self,
                           "WebHost_information",
                           value=work_server.instance_public_ip,
                           description="Web server's Public IP")
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self, "vpc",
            cidr=props['vpc_CIDR'],
            max_azs=3,
            subnet_configuration=[
                {
                    'cidrMask': 28,
                    'name': 'public',
                    'subnetType': ec2.SubnetType.PUBLIC
                },
                {
                    'cidrMask': 28,
                    'name': 'private',
                    'subnetType': ec2.SubnetType.PRIVATE
                },
                {
                    'cidrMask': 28,
                    'name': 'db',
                    'subnetType': ec2.SubnetType.ISOLATED
                }
            ]
        )

        rds_subnetGroup = rds.SubnetGroup(self, "rds_subnetGroup",
            description = f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc = vpc,
            vpc_subnets = ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED)
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(self,'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2
            ),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props['rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(instance_type_identifier=props['rds_instance_type'])
            ),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(
                retention=core.Duration.days(props['rds_automated_backup_retention_days'])
            )
        )

        EcsToRdsSeurityGroup= ec2.SecurityGroup(self, "EcsToRdsSeurityGroup",
            vpc = vpc,
            description = "Allow WordPress containers to talk to RDS"
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self, 'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED),        #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            }
        )

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(EcsToRdsSeurityGroup, ec2.Port.tcp(3306))   #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(self, "MyEfsFileSystem",
            vpc = vpc,
            encrypted=True, # file system is not encrypted by default
            lifecycle_policy = props['efs_lifecycle_policy'],
            performance_mode = efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode = efs.ThroughputMode.BURSTING,
            removal_policy = core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups = props['efs_automatic_backups']
        )

        if props['deploy_bastion_host']:
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host',
                vpc = vpc
            )
            rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self,
                      "vpc",
                      cidr=props['vpc_CIDR'],
                      max_azs=3,
                      subnet_configuration=[{
                          'cidrMask': 28,
                          'name': 'public',
                          'subnetType': ec2.SubnetType.PUBLIC
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'private',
                          'subnetType':
                          ec2.SubnetType.PRIVATE
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'db',
                          'subnetType':
                          ec2.SubnetType.ISOLATED
                      }])

        rds_subnetGroup = rds.SubnetGroup(
            self,
            "rds_subnetGroup",
            description=
            f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(
            self,
            'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props[
                    'rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(
                    instance_type_identifier=props['rds_instance_type'])),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(retention=core.Duration.days(
                props['rds_automated_backup_retention_days'])))

        EcsToRdsSeurityGroup = ec2.SecurityGroup(
            self,
            "EcsToRdsSeurityGroup",
            vpc=vpc,
            description="Allow WordPress containers to talk to RDS")

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self,
            'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda/db_creds_generator'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED
            ),  #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            })

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(
            EcsToRdsSeurityGroup,
            ec2.Port.tcp(3306))  #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(
            self,
            "MyEfsFileSystem",
            vpc=vpc,
            encrypted=True,  # file system is not encrypted by default
            lifecycle_policy=props['efs_lifecycle_policy'],
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
            removal_policy=core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups=props['efs_automatic_backups'])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster
        cluster = ecs.Cluster(
            self,
            "Cluster",
            vpc=vpc,
            container_insights=props['ecs_enable_container_insights'])

        if props['deploy_bastion_host']:
            #ToDo: Deploy bastion host with a key file
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc)
            rds_instance.connections.allow_from(bastion_host,
                                                ec2.Port.tcp(3306))

            #######################
            ### Developer Tools ###
            # SFTP into the EFS Shared File System

            NetToolsSecret = secretsmanager.Secret(
                self,
                "NetToolsSecret",
                generate_secret_string=secretsmanager.SecretStringGenerator(
                    secret_string_template=json.dumps({
                        "username": '******',
                        "ip": ''
                    }),
                    generate_string_key="password",
                    exclude_characters='/"'))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point
            AccessPoint = file_system.add_access_point(
                "access-point",
                path="/",
                create_acl=efs.Acl(
                    owner_uid=
                    "100",  #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/
                    owner_gid="101",
                    permissions="0755"))

            EfsVolume = ecs.Volume(
                name="efs",
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=file_system.file_system_id,
                    transit_encryption="ENABLED",
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=AccessPoint.access_point_id)))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html
            NetToolsTask = ecs.FargateTaskDefinition(self,
                                                     "TaskDefinition",
                                                     cpu=256,
                                                     memory_limit_mib=512,
                                                     volumes=[EfsVolume])

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container
            NetToolsContainer = NetToolsTask.add_container(
                "NetTools",
                image=ecs.ContainerImage.from_registry('netresearch/sftp'),
                command=['test:test:100:101:efs'])
            NetToolsContainer.add_port_mappings(
                ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP))

            NetToolsContainer.add_mount_points(
                ecs.MountPoint(
                    container_path=
                    "/home/test/efs",  #ToDo build path out with username from secret
                    read_only=False,
                    source_volume=EfsVolume.name,
                ))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService
            service = ecs.FargateService(
                self,
                "Service",
                cluster=cluster,
                task_definition=NetToolsTask,
                platform_version=ecs.FargatePlatformVersion(
                    "VERSION1_4"),  #Required for EFS
            )
            #ToDo somehow store container's IP on deploy

            #Allow traffic to EFS Volume from Net Tools container
            service.connections.allow_to(file_system, ec2.Port.tcp(2049))
            #ToDo allow bastion host into container on port 22

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
            bastion_ip_locator = _lambda.Function(
                self,
                'bastion_ip_locator',
                function_name=
                f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler='bastion_ip_locator.handler',
                code=_lambda.Code.asset('lambda/bastion_ip_locator'),
                environment={
                    'CLUSTER_NAME': cluster.cluster_arn,
                    'SERVICE_NAME': service.service_name
                })

            #Give needed perms to bastion_ip_locator for reading info from ECS
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["ecs:DescribeTasks"],
                    resources=[
                        #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}",
                        f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*"
                    ]))
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(actions=[
                    "ecs:ListTasks",
                ],
                                    resources=["*"],
                                    conditions={
                                        'ArnEquals': {
                                            'ecs:cluster': cluster.cluster_arn
                                        }
                                    }))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
        self.output_props["cluster"] = cluster
Example #21
0
    def __init__(self, scope: core.Construct, id: str, vpc, env,
                 **kwargs) -> None:
        super().__init__(scope, id, env=env, **kwargs)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
            description="Specify the instance type you want").value_as_string

        key_name = CfnParameter(
            self,
            "ssh key",
            type="String",
            default="key_ireland",
            description="Specify your SSH key").value_as_string

        # Create Bastion Server
        bastion = ec2.BastionHostLinux(
            self,
            "Bastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="BastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create NLB
        nlb = elb.NetworkLoadBalancer(self,
                                      "emq-elb",
                                      vpc=vpc,
                                      internet_facing=True,
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        listener = nlb.add_listener("port1883", port=1883)

        # Create Autoscaling Group with desired 2*EC2 hosts
        asg = autoscaling.AutoScalingGroup(
            self,
            "emq-asg",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            health_check=HealthCheck.elb(grace=Duration.seconds(60)),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=4)

        # NLB cannot associate with a security group therefore NLB object has no Connection object
        # Must modify manuall inbound rule of the newly created asg security group to allow access
        # from NLB IP only
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(1883),
            "Allow NLB access 1883 port of EC2 in Autoscaling Group")

        asg.connections.allow_from(bastion, ec2.Port.tcp(22),
                                   "Allow SSH from the bastion only")

        listener.add_targets("addTargetGroup", port=1883, targets=[asg])

        core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name)
Example #22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = ec2.Vpc(
            self,
            id="VPC",
            cidr="10.0.0.0/16",
            max_azs=2,
            nat_gateways=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        cidr_mask=24,
                                        reserved=False,
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="private",
                                        cidr_mask=24,
                                        reserved=False,
                                        subnet_type=ec2.SubnetType.PRIVATE),
                ec2.SubnetConfiguration(name="DB",
                                        cidr_mask=24,
                                        reserved=False,
                                        subnet_type=ec2.SubnetType.ISOLATED),
                # ec2.SubnetConfiguration(
                #     name="DB2", cidr_mask=24,
                #     reserved=False, subnet_type=ec2.SubnetType.ISOLATED
                # )
            ],
            enable_dns_hostnames=True,
            enable_dns_support=True)

        core.Tag(key="Application", value=self.stack_name) \
            .add(self.vpc, key="Application", value=self.stack_name)
        # core.Tag("Network", "Public").add(vpc)
        # core.Tag("Name", "VPCName-Environment").add(vpc)
        # core.Tag("Environment", "Environment").add(vpc)

        bastion = ec2.BastionHostLinux(
            self,
            id="BastionHost",
            vpc=self.vpc,
            instance_name="BastionHost",
            instance_type=ec2.InstanceType(ec2_type),
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC))
        bastion.allow_ssh_access_from(ec2.Peer.any_ipv4())

        # Setup key_name for EC2 instance login if you don't use Session Manager
        #bastion.instance.instance.add_property_override("KeyName", key_name)

        ec2.CfnEIP(self,
                   id="BastionHostEIP",
                   domain="vpc",
                   instance_id=bastion.instance_id)

        core.CfnOutput(
            self,
            id="VPCId",
            value=self.vpc.vpc_id,
            description="VPC ID",
            export_name=f"{self.region}:{self.account}:{self.stack_name}:vpc-id"
        )

        core.CfnOutput(
            self,
            id="BastionPrivateIP",
            value=bastion.instance_private_ip,
            description="BASTION Private IP",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:bastion-private-ip"
        )

        core.CfnOutput(
            self,
            id="BastionPublicIP",
            value=bastion.instance_public_ip,
            description="BASTION Public IP",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:bastion-public-ip"
        )
Example #23
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Bastion
        bastion = ec2.BastionHostLinux(
            self,
            "myBastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="myBastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        # Setup key_name for EC2 instance login if you don't use Session Manager
        # bastion.instance.instance.add_property_override("KeyName", key_name)

        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create ALB
        alb = elb.ApplicationLoadBalancer(self,
                                          "myALB",
                                          vpc=vpc,
                                          internet_facing=True,
                                          load_balancer_name="myALB")
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80),
                                            "Internet access ALB 80")
        listener = alb.add_listener("my80", port=80, open=True)

        # Create Autoscaling Group with fixed 2*EC2 hosts
        self.asg = autoscaling.AutoScalingGroup(
            self,
            "myASG",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=2,
            # block_devices=[
            #     autoscaling.BlockDevice(
            #         device_name="/dev/xvda",
            #         volume=autoscaling.BlockDeviceVolume.ebs(
            #             volume_type=autoscaling.EbsDeviceVolumeType.GP2,
            #             volume_size=12,
            #             delete_on_termination=True
            #         )),
            #     autoscaling.BlockDevice(
            #         device_name="/dev/sdb",
            #         volume=autoscaling.BlockDeviceVolume.ebs(
            #             volume_size=20)
            #         # 20GB, with default volume_type gp2
            #     )
            # ]
        )

        self.asg.connections.allow_from(
            alb, ec2.Port.tcp(80),
            "ALB access 80 port of EC2 in Autoscaling Group")
        listener.add_targets("addTargetGroup", port=80, targets=[self.asg])

        core.CfnOutput(self, "Output", value=alb.load_balancer_dns_name)
Example #24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # s3
        s3_bucket_name = "{}-s3-{}".format(Constant.PROJECT_NAME,
                                           self._get_UUID(4))
        _s3.Bucket(
            self,
            id=s3_bucket_name,
            bucket_name=s3_bucket_name,
            removal_policy=core.RemovalPolicy.
            DESTROY,  #TODO:  destroy for test
            # removal_policy=core.RemovalPolicy.RETAIN
        )

        # step 1. VPC
        # 如果在已有的Vpc 中建立环境, 可以用下面这句, 需要传入 vpc_id
        # vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id='')
        vpc = ec2.Vpc(
            self,
            "VPC",
            max_azs=2,  # 两个分区, 每个分区建一个子网
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24),
                # ec2.SubnetConfiguration(
                #     subnet_type=ec2.SubnetType.ISOLATED,
                #     name="DB",
                #     cidr_mask=24
                # )
            ],
            # nat_gateway_provider=ec2.NatProvider.gateway(),
            # nat_gateways=2,
        )

        # ES 需要部署到私有子网中
        selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE)

        # step 2. 访问S3 + ES集群需要的 iam_instance_profile
        #  action -> statement -> policy -> role -> instance profile ->  attach ec2

        actions = [
            "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface",
            "ec2:DescribeNetworkInterfaces",
            "ec2:ModifyNetworkInterfaceAttribute",
            "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets",
            "ec2:DescribeVpcs", "s3:*"
        ]

        policyStatement = PolicyStatement(actions=actions, effect=Effect.ALLOW)
        policyStatement.add_all_resources()
        policyStatement.sid = "Stmt1480452973134"

        policy_name = "{}-ec2-es-policy".format(Constant.PROJECT_NAME)
        ec2_policy = Policy(self, policy_name, policy_name=policy_name)

        ec2_policy.add_statements(policyStatement)

        role_name = "{}-ec2-es-role".format(Constant.PROJECT_NAME)
        access_es_role = Role(
            self,
            role_name,
            role_name=role_name,
            assumed_by=ServicePrincipal('ec2.amazonaws.com.cn'))

        ec2_policy.attach_to_role(access_es_role)

        profile_name = "{}-ec2-es-profile".format(Constant.PROJECT_NAME)
        instance_profile = CfnInstanceProfile(
            self,
            profile_name,
            instance_profile_name=profile_name,
            roles=[access_es_role.role_name])

        # step 4. ES

        # 生产环境建议设置安全组, 只接收VPC内443端口请求
        sg_es_cluster_name = "{}-sg-es".format(Constant.PROJECT_NAME)
        sg_es_cluster = ec2.SecurityGroup(
            self,
            id=sg_es_cluster_name,
            vpc=vpc,
            security_group_name=sg_es_cluster_name)

        sg_es_cluster.add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                       connection=ec2.Port.tcp(443))

        es_name = Constant.PROJECT_NAME
        es_arn = self.format_arn(service="es",
                                 resource="domain",
                                 sep="/",
                                 resource_name=es_name)
        es = elasticsearch.CfnDomain(
            self,
            es_name,
            elasticsearch_version='7.1',
            domain_name=es_name,
            node_to_node_encryption_options={"enabled": False},
            vpc_options={
                "securityGroupIds": [sg_es_cluster.security_group_id
                                     ],  # 生产环境建议设置安全组, 只接收VPC内443端口请求
                # 如果开启多个节点, 需要配置多个子网, 目前测试只有一个ES 节点, 就只用到一个子网
                "subnetIds": selection.subnet_ids[:1]
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            elasticsearch_cluster_config={
                # 生成环境需要开启三个
                # "dedicatedMasterCount": 3,
                # "dedicatedMasterEnabled": True,
                # "dedicatedMasterType": 'm4.large.elasticsearch',
                "instanceCount": 1,
                "instanceType": 'm4.large.elasticsearch',
                "zoneAwarenessEnabled": False
            })
        es.access_policies = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Effect": "Allow",
                "Principal": {
                    "AWS": "*"
                },
                "Action": "es:*",
                "Resource": "{}/*".format(es_arn)
            }]
        }

        # step 5.  SNS
        topic = sns.Topic(self, "topic")
        topic.add_subscription(subs.EmailSubscription(Constant.EMAIL_ADDRESS))

        # 设置SNS endpoint, 让lambda 可以从vpc 内部访问
        vpc.add_interface_endpoint(
            "SNSEndpoint", service=ec2.InterfaceVpcEndpointAwsService.SNS)

        # step 6. Lambda
        lambdaFn = lambda_.Function(self,
                                    "Singleton",
                                    code=lambda_.Code.asset('lambda'),
                                    handler='hello.handler',
                                    vpc=vpc,
                                    vpc_subnets=ec2.SubnetSelection(
                                        subnet_type=ec2.SubnetType.PRIVATE),
                                    timeout=core.Duration.seconds(300),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    environment={
                                        'SNS_TOPIC_ARN': topic.topic_arn,
                                        'ES_ENDPOINT': es.attr_domain_endpoint,
                                        'ES_INDEX_NAME': Constant.ES_INDEX_NAME
                                    })

        # step 7. Cloud watch event
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='0/5',
                                          hour='*',
                                          month='*',
                                          week_day='*',
                                          year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))

        #给Lambda 添加发布SNS的权限
        topic.grant_publish(lambdaFn)

        # Create ALB
        alb_name = "{}-alb".format(Constant.PROJECT_NAME)
        alb = elb.ApplicationLoadBalancer(self,
                                          alb_name,
                                          vpc=vpc,
                                          internet_facing=True,
                                          load_balancer_name=alb_name)
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80),
                                            "Internet access ALB 80")
        listener = alb.add_listener("my80", port=80, open=True)

        # Create Autoscaling Group with fixed 2*EC2 hosts

        user_data = user_data_content.format(es.attr_domain_endpoint,
                                             Constant.REGION_NAME,
                                             Constant.ES_LOG_PATH,
                                             Constant.ES_INDEX_NAME,
                                             s3_bucket_name)

        # step 3. 创建堡垒机

        bastion_name = "{}-bastion".format(Constant.PROJECT_NAME)
        bastion = ec2.BastionHostLinux(
            self,
            bastion_name,
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name=bastion_name,
            instance_type=ec2.InstanceType(
                instance_type_identifier="m4.large"))
        bastion.instance.instance.add_property_override(
            "KeyName", Constant.EC2_KEY_NAME)
        bastion.connections.allow_from_any_ipv4(
            ec2.Port.tcp(22), "Internet access SSH")  # 生成环境可以限定IP allow_from
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(8080),
                                                "Internet access HTTP")  # 测试需要
        # bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(443), "Internet access HTTPS")  # 测试需要

        bastion.instance.instance.iam_instance_profile = instance_profile.instance_profile_name  # 给EC2设置 profile , 相当于Role
        bastion.instance.instance.image_id = ami_map.get(
            Constant.REGION_NAME)  # 指定AMI ID

        #堡垒机的user_data 只能执行一次, 如果要执行多次, 请参考 https://amazonaws-china.com/premiumsupport/knowledge-center/execute-user-data-ec2/?nc1=h_ls
        bastion_user_data = "/home/ec2-user/start.sh {}  {} '{}' {} {}".format(
            es.attr_domain_endpoint, Constant.REGION_NAME,
            Constant.ES_LOG_PATH, Constant.ES_INDEX_NAME, s3_bucket_name)
        bastion.instance.add_user_data(
            "date >> /home/ec2-user/root.txt")  # 查看启动脚本是否执行
        bastion.instance.add_user_data(bastion_user_data)

        asg_name = "{}-asg".format(Constant.PROJECT_NAME)
        asg = autoscaling.AutoScalingGroup(
            self,
            asg_name,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),  # PUBLIC for debug
            instance_type=ec2.InstanceType(
                instance_type_identifier="m4.large"),
            machine_image=my_ami,
            key_name=Constant.EC2_KEY_NAME,
            user_data=ec2.UserData.custom(user_data),
            desired_capacity=1,
            min_capacity=1,
            max_capacity=1,
            role=access_es_role)

        asg.connections.allow_from(
            alb, ec2.Port.tcp(8080),
            "ALB access 80 port of EC2 in Autoscaling Group")
        # asg.connections.allow_from_any_ipv4(ec2.Port.tcp(8080), "Internet access HTTP for test") # 测试用
        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                            "Internet access SSH")  # for debug
        listener.add_targets("addTargetGroup", port=8080, targets=[asg])

        core.CfnOutput(self,
                       "s3_bucket_name",
                       value=s3_bucket_name,
                       description='S3 bucket:  store web log')

        core.CfnOutput(self,
                       "ElasticSearchEndpointUrl",
                       value=es.attr_domain_endpoint,
                       description='Elastic Search Url')

        # Elastic Search 统计log 数量, 可以在堡垒机上执行, 快速查看日志数量。
        core.CfnOutput(self,
                       "CmdGetCountIndex",
                       value='curl https://{}/{}/_count'.format(
                           es.attr_domain_endpoint, Constant.ES_INDEX_NAME),
                       description='Count search result. ')

        # 堡垒机的登录命令, 可以直接复制使用
        core.CfnOutput(self,
                       "CmdSshToBastion",
                       value='ssh -i ~/{}.pem ec2-user@{}'.format(
                           Constant.EC2_KEY_NAME,
                           bastion.instance_public_dns_name),
                       description='cmd ssh to bastion')

        # 在堡垒机上启动服务的命令, 堡垒机重启以后, 需要执行下面的命令, 可以启动web服务 发送日志到ES
        core.CfnOutput(
            self,
            "CmdSshBastionStartWeb",
            value='sudo {}'.format(bastion_user_data),
            description="Cmd to start web+logstash+filebeat service")

        # ALB 的访问地址
        core.CfnOutput(self,
                       "UrlLoad_Balancer",
                       value='http://{}'.format(alb.load_balancer_dns_name),
                       description='ALB  url ')

        # 堡垒机的web访问地址, 为了调试方便, 在堡垒机上也使用相同的AMI。
        core.CfnOutput(self,
                       "UrlBastion",
                       value='http://{}:8080'.format(
                           bastion.instance_public_dns_name),
                       description="Bastion server web url ")

        # 下面这条输出的命令 是通过堡垒机和Elasticsearch 建立隧道, 在本地访问kibana。
        core.CfnOutput(
            self,
            "CmdSshProxyToKibana",
            value='ssh -i ~/{}.pem ec2-user@{}  -N -L 9200:{}:443'.format(
                Constant.EC2_KEY_NAME, bastion.instance_public_dns_name,
                es.attr_domain_endpoint),
            description="cmd: access kibana from bastion ssh. ")
        # 执行完上面的命令后, 在浏览器中打开下面的连接
        core.CfnOutput(self,
                       "UrlKibana",
                       value='https://localhost:9200/_plugin/kibana/',
                       description="kibana url ")