Ejemplo n.º 1
0
    def _setup_elasticsearch_7_10_fgac(self) -> None:
        domain_name = "wrangler-es-7-10-fgac"
        validate_domain_name(domain_name)
        domain_arn = f"arn:aws:es:{self.region}:{self.account}:domain/{domain_name}"
        domain = opensearch.Domain(
            self,
            domain_name,
            domain_name=domain_name,
            version=opensearch.EngineVersion.ELASTICSEARCH_7_10,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type="t3.small.search", data_nodes=1),
            access_policies=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["es:*"],
                    principals=[iam.AnyPrincipal()],  # FGACs
                    resources=[f"{domain_arn}/*"],
                )
            ],
            fine_grained_access_control=opensearch.AdvancedSecurityOptions(
                master_user_name=self.username,
                master_user_password=self.password_secret,
            ),
            node_to_node_encryption=True,
            encryption_at_rest=opensearch.EncryptionAtRestOptions(
                enabled=True, kms_key=self.key),
            enforce_https=True,
            removal_policy=RemovalPolicy.DESTROY,
        )

        CfnOutput(self,
                  f"DomainEndpoint-{domain_name}",
                  value=domain.domain_endpoint)
Ejemplo n.º 2
0
    def _setup_opensearch_1_0(self) -> None:
        domain_name = "wrangler-os-1-0"
        validate_domain_name(domain_name)
        domain_arn = f"arn:aws:es:{self.region}:{self.account}:domain/{domain_name}"
        domain = opensearch.Domain(
            self,
            domain_name,
            domain_name=domain_name,
            version=opensearch.EngineVersion.OPENSEARCH_1_0,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type="t3.small.search", data_nodes=1),
            access_policies=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["es:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=[f"{domain_arn}/*"],
                )
            ],
            removal_policy=RemovalPolicy.DESTROY,
        )

        CfnOutput(self,
                  f"DomainEndpoint-{domain_name}",
                  value=domain.domain_endpoint)
Ejemplo n.º 3
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        OPENSEARCH_DOMAIN_NAME = cdk.CfnParameter(
            self,
            'OpenSearchDomainName',
            type='String',
            description='Amazon OpenSearch Service domain name',
            default='opensearch-{}'.format(''.join(
                random.sample((string.ascii_letters), k=5))),
            allowed_pattern='[a-z]+[A-Za-z0-9\-]+')

        EC2_KEY_PAIR_NAME = cdk.CfnParameter(
            self,
            'EC2KeyPairName',
            type='String',
            description='Amazon EC2 Instance KeyPair name')

        #XXX: For createing Amazon MWAA in the existing VPC,
        # remove comments from the below codes and
        # comments out vpc = aws_ec2.Vpc(..) codes,
        # then pass -c vpc_name=your-existing-vpc to cdk command
        # for example,
        # cdk -c vpc_name=your-existing-vpc syth
        #
        # vpc_name = self.node.try_get_context('vpc_name')
        # vpc = aws_ec2.Vpc.from_lookup(self, 'ExistingVPC',
        #   is_default=True,
        #   vpc_name=vpc_name
        # )

        vpc = aws_ec2.Vpc(
            self,
            "OpenSearchVPC",
            max_azs=3,
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceClass.html
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceSize.html#aws_cdk.aws_ec2.InstanceSize
        ec2_instance_type = aws_ec2.InstanceType.of(
            aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM)

        sg_bastion_host = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an bastion host',
            security_group_name='bastion-host-sg')
        cdk.Tags.of(sg_bastion_host).add('Name', 'bastion-host-sg')

        #TODO: SHOULD restrict IP range allowed to ssh acces
        sg_bastion_host.add_ingress_rule(peer=aws_ec2.Peer.ipv4("0.0.0.0/0"),
                                         connection=aws_ec2.Port.tcp(22),
                                         description='SSH access')

        bastion_host = aws_ec2.Instance(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=ec2_instance_type,
            machine_image=aws_ec2.MachineImage.latest_amazon_linux(),
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=sg_bastion_host,
            key_name=EC2_KEY_PAIR_NAME.value_as_string)

        sg_use_opensearch = aws_ec2.SecurityGroup(
            self,
            "OpenSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an opensearch client',
            security_group_name='use-opensearch-cluster-sg')
        cdk.Tags.of(sg_use_opensearch).add('Name', 'use-opensearch-cluster-sg')

        sg_opensearch_cluster = aws_ec2.SecurityGroup(
            self,
            "OpenSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an opensearch cluster',
            security_group_name='opensearch-cluster-sg')
        cdk.Tags.of(sg_opensearch_cluster).add('Name', 'opensearch-cluster-sg')

        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_opensearch_cluster,
            connection=aws_ec2.Port.all_tcp(),
            description='opensearch-cluster-sg')

        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_use_opensearch,
            connection=aws_ec2.Port.tcp(443),
            description='use-opensearch-cluster-sg')
        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_use_opensearch,
            connection=aws_ec2.Port.tcp_range(9200, 9300),
            description='use-opensearch-cluster-sg')

        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_bastion_host,
            connection=aws_ec2.Port.tcp(443),
            description='bastion-host-sg')
        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_bastion_host,
            connection=aws_ec2.Port.tcp_range(9200, 9300),
            description='bastion-host-sg')

        master_user_secret = aws_secretsmanager.Secret(
            self,
            "OpenSearchMasterUserSecret",
            generate_secret_string=aws_secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps({"username": "******"}),
                generate_string_key="password",
                # Master password must be at least 8 characters long and contain at least one uppercase letter,
                # one lowercase letter, one number, and one special character.
                password_length=8))

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        # You should camelCase the property names instead of PascalCase
        opensearch_domain = aws_opensearchservice.Domain(
            self,
            "OpenSearch",
            domain_name=OPENSEARCH_DOMAIN_NAME.value_as_string,
            version=aws_opensearchservice.EngineVersion.OPENSEARCH_1_0,
            capacity={
                "master_nodes": 3,
                "master_node_instance_type": "r6g.large.search",
                "data_nodes": 3,
                "data_node_instance_type": "r6g.large.search"
            },
            ebs={
                "volume_size": 10,
                "volume_type": aws_ec2.EbsDeviceVolumeType.GP2
            },
            #XXX: az_count must be equal to vpc subnets count.
            zone_awareness={"availability_zone_count": 3},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            },
            fine_grained_access_control=aws_opensearchservice.
            AdvancedSecurityOptions(
                master_user_name=master_user_secret.secret_value_from_json(
                    "username").to_string(),
                master_user_password=master_user_secret.secret_value_from_json(
                    "password")),
            # Enforce HTTPS is required when fine-grained access control is enabled.
            enforce_https=True,
            # Node-to-node encryption is required when fine-grained access control is enabled
            node_to_node_encryption=True,
            # Encryption-at-rest is required when fine-grained access control is enabled.
            encryption_at_rest={"enabled": True},
            use_unsigned_basic_auth=True,
            security_groups=[sg_opensearch_cluster],
            automated_snapshot_start_hour=17,  # 2 AM (GTM+9)
            vpc=vpc,
            vpc_subnets=[
                aws_ec2.SubnetSelection(
                    one_per_az=True,
                    subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT)
            ],
            removal_policy=cdk.RemovalPolicy.
            DESTROY  # default: cdk.RemovalPolicy.RETAIN
        )
        cdk.Tags.of(opensearch_domain).add(
            'Name', f'{OPENSEARCH_DOMAIN_NAME.value_as_string}')

        cdk.CfnOutput(self,
                      'BastionHostId',
                      value=bastion_host.instance_id,
                      export_name='BastionHostId')
        cdk.CfnOutput(self,
                      'OpenSearchDomainEndpoint',
                      value=opensearch_domain.domain_endpoint,
                      export_name='OpenSearchDomainEndpoint')
        cdk.CfnOutput(
            self,
            'OpenSearchDashboardsURL',
            value=f"{opensearch_domain.domain_endpoint}/_dashboards/",
            export_name='OpenSearchDashboardsURL')
        cdk.CfnOutput(self,
                      'MasterUserSecretId',
                      value=master_user_secret.secret_name,
                      export_name='MasterUserSecretId')
Ejemplo n.º 4
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        OPENSEARCH_DOMAIN_NAME = cdk.CfnParameter(
            self,
            'OpenSearchDomainName',
            type='String',
            description='Amazon OpenSearch Service domain name',
            default='opensearch-{}'.format(''.join(
                random.sample((string.ascii_letters), k=5))),
            allowed_pattern='[a-z]+[A-Za-z0-9\-]+')

        OPENSEARCH_INDEX_NAME = cdk.CfnParameter(
            self,
            'SearchIndexName',
            type='String',
            description='Amazon OpenSearch Service index name')

        EC2_KEY_PAIR_NAME = cdk.CfnParameter(
            self,
            'EC2KeyPairName',
            type='String',
            description='Amazon EC2 Instance KeyPair name')

        # vpc_name = self.node.try_get_context("vpc_name")
        # vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC",
        #   is_default=True,
        #   vpc_name=vpc_name)
        #
        vpc = aws_ec2.Vpc(
            self,
            "EKKStackVPC",
            max_azs=3,
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceClass.html
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/InstanceSize.html#aws_cdk.aws_ec2.InstanceSize
        ec2_instance_type = aws_ec2.InstanceType.of(
            aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM)

        sg_bastion_host = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an bastion host',
            security_group_name='bastion-host-sg')
        cdk.Tags.of(sg_bastion_host).add('Name', 'bastion-host-sg')

        #TODO: SHOULD restrict IP range allowed to ssh acces
        sg_bastion_host.add_ingress_rule(peer=aws_ec2.Peer.ipv4("0.0.0.0/0"),
                                         connection=aws_ec2.Port.tcp(22),
                                         description='SSH access')

        bastion_host = aws_ec2.Instance(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=ec2_instance_type,
            machine_image=aws_ec2.MachineImage.latest_amazon_linux(),
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=sg_bastion_host,
            key_name=EC2_KEY_PAIR_NAME.value_as_string)

        sg_use_opensearch = aws_ec2.SecurityGroup(
            self,
            "OpenSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an opensearch client',
            security_group_name='use-opensearch-cluster-sg')
        cdk.Tags.of(sg_use_opensearch).add('Name', 'use-opensearch-cluster-sg')

        sg_opensearch_cluster = aws_ec2.SecurityGroup(
            self,
            "OpenSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for an opensearch cluster',
            security_group_name='opensearch-cluster-sg')
        cdk.Tags.of(sg_opensearch_cluster).add('Name', 'opensearch-cluster-sg')

        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_opensearch_cluster,
            connection=aws_ec2.Port.all_tcp(),
            description='opensearch-cluster-sg')

        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_use_opensearch,
            connection=aws_ec2.Port.tcp(443),
            description='use-opensearch-cluster-sg')
        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_use_opensearch,
            connection=aws_ec2.Port.tcp_range(9200, 9300),
            description='use-opensearch-cluster-sg')

        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_bastion_host,
            connection=aws_ec2.Port.tcp(443),
            description='bastion-host-sg')
        sg_opensearch_cluster.add_ingress_rule(
            peer=sg_bastion_host,
            connection=aws_ec2.Port.tcp_range(9200, 9300),
            description='bastion-host-sg')

        master_user_secret = aws_secretsmanager.Secret(
            self,
            "OpenSearchMasterUserSecret",
            generate_secret_string=aws_secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps({"username": "******"}),
                generate_string_key="password",
                # Master password must be at least 8 characters long and contain at least one uppercase letter,
                # one lowercase letter, one number, and one special character.
                password_length=8))

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        # You should camelCase the property names instead of PascalCase
        opensearch_domain = aws_opensearchservice.Domain(
            self,
            "OpenSearch",
            domain_name=OPENSEARCH_DOMAIN_NAME.value_as_string,
            version=aws_opensearchservice.EngineVersion.OPENSEARCH_1_0,
            #XXX: You cannot use graviton instances with non-graviton instances.
            # Use graviton instances as data nodes or use non-graviton instances as master nodes.
            capacity={
                "master_nodes": 3,
                "master_node_instance_type": "r6g.large.search",
                "data_nodes": 3,
                "data_node_instance_type": "r6g.large.search"
            },
            ebs={
                "volume_size": 10,
                "volume_type": aws_ec2.EbsDeviceVolumeType.GP2
            },
            #XXX: az_count must be equal to vpc subnets count.
            zone_awareness={"availability_zone_count": 3},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            },
            fine_grained_access_control=aws_opensearchservice.
            AdvancedSecurityOptions(
                master_user_name=master_user_secret.secret_value_from_json(
                    "username").to_string(),
                master_user_password=master_user_secret.secret_value_from_json(
                    "password")),
            # Enforce HTTPS is required when fine-grained access control is enabled.
            enforce_https=True,
            # Node-to-node encryption is required when fine-grained access control is enabled
            node_to_node_encryption=True,
            # Encryption-at-rest is required when fine-grained access control is enabled.
            encryption_at_rest={"enabled": True},
            use_unsigned_basic_auth=True,
            security_groups=[sg_opensearch_cluster],
            automated_snapshot_start_hour=17,  # 2 AM (GTM+9)
            vpc=vpc,
            vpc_subnets=[
                aws_ec2.SubnetSelection(
                    one_per_az=True,
                    subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT)
            ],
            removal_policy=cdk.RemovalPolicy.
            DESTROY  # default: cdk.RemovalPolicy.RETAIN
        )
        cdk.Tags.of(opensearch_domain).add(
            'Name', f'{OPENSEARCH_DOMAIN_NAME.value_as_string}')

        S3_BUCKET_SUFFIX = ''.join(
            random.sample((string.ascii_lowercase + string.digits), k=7))
        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            removal_policy=cdk.RemovalPolicy.
            DESTROY,  #XXX: Default: core.RemovalPolicy.RETAIN - The bucket will be orphaned
            bucket_name="opskk-stack-{region}-{suffix}".format(
                region=cdk.Aws.REGION, suffix=S3_BUCKET_SUFFIX))

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                resources=["*"],
                actions=[
                    "ec2:DescribeVpcs", "ec2:DescribeVpcAttribute",
                    "ec2:DescribeSubnets", "ec2:DescribeSecurityGroups",
                    "ec2:DescribeNetworkInterfaces",
                    "ec2:CreateNetworkInterface",
                    "ec2:CreateNetworkInterfacePermission",
                    "ec2:DeleteNetworkInterface"
                ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                resources=[
                    opensearch_domain.domain_arn,
                    "{}/*".format(opensearch_domain.domain_arn)
                ],
                actions=[
                    "es:DescribeElasticsearchDomain",
                    "es:DescribeElasticsearchDomains",
                    "es:DescribeElasticsearchDomainConfig", "es:ESHttpPost",
                    "es:ESHttpPut"
                ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: https://aws.amazon.com/premiumsupport/knowledge-center/kinesis-data-firehose-delivery-failure/
                resources=[
                    opensearch_domain.domain_arn,
                    f"{opensearch_domain.domain_arn}/_all/_settings",
                    f"{opensearch_domain.domain_arn}/_cluster/stats",
                    f"{opensearch_domain.domain_arn}/{OPENSEARCH_INDEX_NAME.value_as_string}*/_mapping/%FIREHOSE_POLICY_TEMPLATE_PLACEHOLDER%",
                    f"{opensearch_domain.domain_arn}/_nodes",
                    f"{opensearch_domain.domain_arn}/_nodes/stats",
                    f"{opensearch_domain.domain_arn}/_nodes/*/stats",
                    f"{opensearch_domain.domain_arn}/_stats",
                    f"{opensearch_domain.domain_arn}/{OPENSEARCH_INDEX_NAME.value_as_string}*/_stats"
                ],
                actions=["es:ESHttpGet"]))

        firehose_log_group_name = f"/aws/kinesisfirehose/{OPENSEARCH_INDEX_NAME.value_as_string}"
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(
                        service="logs",
                        resource="log-group",
                        resource_name="{}:log-stream:*".format(
                            firehose_log_group_name),
                        arn_format=cdk.ArnFormat.COLON_RESOURCE_NAME)
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "KinesisFirehoseServiceRole",
            role_name=
            f"KinesisFirehoseServiceRole-{OPENSEARCH_INDEX_NAME.value_as_string}-{cdk.Aws.REGION}",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        opensearch_dest_vpc_config = aws_kinesisfirehose.CfnDeliveryStream.VpcConfigurationProperty(
            role_arn=firehose_role.role_arn,
            security_group_ids=[sg_use_opensearch.security_group_id],
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT).subnet_ids)

        opensearch_dest_config = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchDestinationConfigurationProperty(
            index_name=OPENSEARCH_INDEX_NAME.value_as_string,
            role_arn=firehose_role.role_arn,
            s3_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Backup"
                },
                "compressionFormat":
                "UNCOMPRESSED",  # [GZIP | HADOOP_SNAPPY | Snappy | UNCOMPRESSED | ZIP]
                # Kinesis Data Firehose automatically appends the “YYYY/MM/dd/HH/” UTC prefix to delivered S3 files. You can also specify
                # an extra prefix in front of the time format and add "/" to the end to have it appear as a folder in the S3 console.
                "prefix": f"{OPENSEARCH_INDEX_NAME.value_as_string}/",
                "roleArn": firehose_role.role_arn
            },
            buffering_hints={
                "intervalInSeconds": 60,
                "sizeInMBs": 1
            },
            cloud_watch_logging_options={
                "enabled": True,
                "logGroupName": firehose_log_group_name,
                "logStreamName": "ElasticsearchDelivery"
            },
            domain_arn=opensearch_domain.domain_arn,
            index_rotation_period=
            "NoRotation",  # [NoRotation | OneDay | OneHour | OneMonth | OneWeek]
            retry_options={"durationInSeconds": 60},
            s3_backup_mode=
            "FailedDocumentsOnly",  # [AllDocuments | FailedDocumentsOnly]
            vpc_configuration=opensearch_dest_vpc_config)

        firehose_to_ops_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "KinesisFirehoseToES",
            delivery_stream_name=OPENSEARCH_INDEX_NAME.value_as_string,
            delivery_stream_type="DirectPut",
            elasticsearch_destination_configuration=opensearch_dest_config,
            tags=[{
                "key": "Name",
                "value": OPENSEARCH_INDEX_NAME.value_as_string
            }])

        cdk.CfnOutput(self,
                      'BastionHostId',
                      value=bastion_host.instance_id,
                      export_name='BastionHostId')
        cdk.CfnOutput(self,
                      'OpenSearchDomainEndpoint',
                      value=opensearch_domain.domain_endpoint,
                      export_name='OpenSearchDomainEndpoint')
        cdk.CfnOutput(
            self,
            'OpenSearchDashboardsURL',
            value=f"{opensearch_domain.domain_endpoint}/_dashboards/",
            export_name='OpenSearchDashboardsURL')
        cdk.CfnOutput(self,
                      'MasterUserSecretId',
                      value=master_user_secret.secret_name,
                      export_name='MasterUserSecretId')
        cdk.CfnOutput(self,
                      '{}_S3DestBucket'.format(self.stack_name),
                      value=s3_bucket.bucket_name,
                      export_name='S3DestBucket')
        cdk.CfnOutput(self,
                      'FirehoseRoleArn',
                      value=firehose_role.role_arn,
                      export_name='FirehoseRoleArn')
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ################################################################################
        # VPC
        vpc = ec2.Vpc(self, "Monitoring VPC", max_azs=3)

        ################################################################################
        # Amazon OpenSearch Service domain
        es_sec_grp = ec2.SecurityGroup(
            self,
            'OpenSearchSecGrpMonitoring',
            vpc=vpc,
            allow_all_outbound=True,
            security_group_name='OpenSearchSecGrpMonitoring')
        es_sec_grp.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80))
        es_sec_grp.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443))

        domain = opensearch.Domain(
            self,
            'opensearch-service-monitor',
            version=opensearch.EngineVersion.
            OPENSEARCH_1_0,  # Upgrade when CDK upgrades
            domain_name=DOMAIN_NAME,
            removal_policy=core.RemovalPolicy.DESTROY,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type=DOMAIN_DATA_NODE_INSTANCE_TYPE,
                data_nodes=DOMAIN_DATA_NODE_INSTANCE_COUNT,
                master_node_instance_type=DOMAIN_MASTER_NODE_INSTANCE_TYPE,
                master_nodes=DOMAIN_MASTER_NODE_INSTANCE_COUNT,
                warm_instance_type=DOMAIN_UW_NODE_INSTANCE_TYPE,
                warm_nodes=DOMAIN_UW_NODE_INSTANCE_COUNT),
            ebs=opensearch.EbsOptions(enabled=True,
                                      volume_size=DOMAIN_INSTANCE_VOLUME_SIZE,
                                      volume_type=ec2.EbsDeviceVolumeType.GP2),
            vpc=vpc,
            vpc_subnets=[ec2.SubnetType.PUBLIC],
            security_groups=[es_sec_grp],
            zone_awareness=opensearch.ZoneAwarenessConfig(
                enabled=True, availability_zone_count=DOMAIN_AZ_COUNT),
            enforce_https=True,
            node_to_node_encryption=True,
            encryption_at_rest={"enabled": True},
            use_unsigned_basic_auth=True,
            fine_grained_access_control={
                "master_user_name":
                DOMAIN_ADMIN_UNAME,
                "master_user_password":
                core.SecretValue.plain_text(DOMAIN_ADMIN_PW)
            })

        core.CfnOutput(
            self,
            "MasterUser",
            value=DOMAIN_ADMIN_UNAME,
            description="Master User Name for Amazon OpenSearch Service")

        core.CfnOutput(
            self,
            "MasterPW",
            value=DOMAIN_ADMIN_PW,
            description="Master User Password for Amazon OpenSearch Service")

        ################################################################################
        # Dynamo DB table for time stamp tracking
        table = ddb.Table(
            self,
            'opensearch-monitor-lambda-timestamp',
            table_name=TABLE_NAME,
            partition_key=ddb.Attribute(name="domain",
                                        type=ddb.AttributeType.STRING),
            sort_key=ddb.Attribute(name='region',
                                   type=ddb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        ################################################################################
        # Lambda monitoring function
        lambda_func = lambda_.Function(
            self,
            'CWMetricsToOpenSearch',
            function_name="CWMetricsToOpenSearch_monitoring",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('CWMetricsToOpenSearch'),
            handler='handler.handler',
            memory_size=1024,
            timeout=core.Duration.minutes(10),
            vpc=vpc)

        table.grant_read_data(lambda_func)
        table.grant_write_data(lambda_func)
        lambda_func.add_environment('TABLE', table.table_name)
        lambda_func.add_environment('DOMAIN_ENDPOINT',
                                    'https://' + domain.domain_endpoint)
        lambda_func.add_environment('DOMAIN_ADMIN_UNAME', DOMAIN_ADMIN_UNAME)
        lambda_func.add_environment('DOMAIN_ADMIN_PW', DOMAIN_ADMIN_PW)
        lambda_func.add_environment('REGIONS', REGIONS_TO_MONITOR)

        # When the domain is created here, restrict access
        lambda_func.add_to_role_policy(
            iam.PolicyStatement(actions=['es:*'], resources=['*']))

        # The function needs to read CW events. Restrict
        lambda_func.add_to_role_policy(
            iam.PolicyStatement(actions=['cloudwatch:*'], resources=['*']))

        lambda_schedule = events.Schedule.rate(
            core.Duration.seconds(LAMBDA_INTERVAL))
        event_lambda_target = targets.LambdaFunction(handler=lambda_func)
        events.Rule(self,
                    "Monitoring",
                    enabled=True,
                    schedule=lambda_schedule,
                    targets=[event_lambda_target])

        ################################################################################
        # Lambda for CW Logs
        lambda_func_cw_logs = lambda_.Function(
            self,
            'CWLogsToOpenSearch',
            function_name="CWLogsToOpenSearch_monitoring",
            runtime=lambda_.Runtime.NODEJS_12_X,
            code=lambda_.Code.asset('CWLogsToOpenSearch'),
            handler='index.handler',
            vpc=vpc)

        # # Load Amazon OpenSearch Service Domain to env variable
        lambda_func_cw_logs.add_environment('DOMAIN_ENDPOINT',
                                            domain.domain_endpoint)

        # # When the domain is created here, restrict access
        lambda_func_cw_logs.add_to_role_policy(
            iam.PolicyStatement(actions=['es:*'], resources=['*']))

        # # The function needs to read CW Logs. Restrict
        lambda_func_cw_logs.add_to_role_policy(
            iam.PolicyStatement(actions=['logs:*'], resources=['*']))

        # Add permission to create CW logs trigger for all specified region and current account, as region does not have an option to be wildcard
        account_id = boto3.client("sts").get_caller_identity()["Account"]
        for region in json.loads(REGIONS_TO_MONITOR):
            lambda_func_cw_logs.add_permission(
                id="lambda-cw-logs-permission-" + region,
                principal=iam.ServicePrincipal("logs.amazonaws.com"),
                action="lambda:InvokeFunction",
                source_arn="arn:aws:logs:" + region + ":" + account_id +
                ":*:*:*")

        ################################################################################
        # Jump host for SSH tunneling and direct access
        sn_public = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)

        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))

        instance = ec2.Instance(
            self,
            'instance',
            instance_type=ec2.InstanceType(EC2_INSTANCE_TYPE),
            vpc=vpc,
            machine_image=amzn_linux,
            vpc_subnets=sn_public,
            key_name=EC2_KEY_NAME,
            role=role,
        )
        instance.connections.allow_from_any_ipv4(ec2.Port.tcp(22), 'SSH')
        instance.connections.allow_from_any_ipv4(ec2.Port.tcp(443), 'HTTPS')

        stmt = iam.PolicyStatement(actions=['es:*'],
                                   resources=[domain.domain_arn])
        instance.add_to_role_policy(stmt)

        # Create SNS topic, subscription, IAM roles, Policies
        sns_topic = sns.Topic(self, "cdk_monitoring_topic")

        sns_topic.add_subscription(
            subscriptions.EmailSubscription(SNS_NOTIFICATION_EMAIL))

        sns_policy_statement = iam.PolicyStatement(
            actions=["sns:publish"],
            resources=[sns_topic.topic_arn],
            effect=iam.Effect.ALLOW)
        sns_policy = iam.ManagedPolicy(self, "cdk_monitoring_policy")
        sns_policy.add_statements(sns_policy_statement)

        sns_role = iam.Role(
            self,
            "cdk_monitoring_sns_role",
            assumed_by=iam.ServicePrincipal("es.amazonaws.com"))
        sns_role.add_managed_policy(sns_policy)

        dirname = os.path.dirname(__file__)
        dashboards_asset = Asset(
            self,
            "DashboardsAsset",
            path=os.path.join(dirname,
                              'export_opensearch_dashboards_V1_0.ndjson'))
        dashboards_asset.grant_read(instance.role)
        dashboards_asset_path = instance.user_data.add_s3_download_command(
            bucket=dashboards_asset.bucket,
            bucket_key=dashboards_asset.s3_object_key,
        )

        nginx_asset = Asset(self,
                            "NginxAsset",
                            path=os.path.join(dirname,
                                              'nginx_opensearch.conf'))
        nginx_asset.grant_read(instance.role)
        nginx_asset_path = instance.user_data.add_s3_download_command(
            bucket=nginx_asset.bucket,
            bucket_key=nginx_asset.s3_object_key,
        )

        alerting_asset = Asset(self,
                               "AlertingAsset",
                               path=os.path.join(dirname, 'create_alerts.sh'))
        alerting_asset.grant_read(instance.role)
        alerting_asset_path = instance.user_data.add_s3_download_command(
            bucket=alerting_asset.bucket,
            bucket_key=alerting_asset.s3_object_key,
        )

        instance.user_data.add_commands(
            "yum update -y",
            "yum install jq -y",
            "amazon-linux-extras install nginx1.12",
            "cd /tmp/assets",
            "mv {} export_opensearch_dashboards_V1_0.ndjson".format(
                dashboards_asset_path),
            "mv {} nginx_opensearch.conf".format(nginx_asset_path),
            "mv {} create_alerts.sh".format(alerting_asset_path),
            "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/nginx/cert.key -out /etc/nginx/cert.crt -subj /C=US/ST=./L=./O=./CN=.\n"
            "cp nginx_opensearch.conf /etc/nginx/conf.d/",
            "sed -i 's/DEFAULT_DOMAIN_NAME/" + DOMAIN_NAME +
            "/g' /tmp/assets/export_opensearch_dashboards_V1_0.ndjson",
            "sed -i 's/DOMAIN_ENDPOINT/" + domain.domain_endpoint +
            "/g' /etc/nginx/conf.d/nginx_opensearch.conf",
            "sed -i 's/DOMAIN_ENDPOINT/" + domain.domain_endpoint +
            "/g' /tmp/assets/create_alerts.sh",
            "sed -i 's=LAMBDA_CW_LOGS_ROLE_ARN=" +
            lambda_func_cw_logs.role.role_arn +
            "=g' /tmp/assets/create_alerts.sh",
            "sed -i 's=SNS_ROLE_ARN=" + sns_role.role_arn +
            "=g' /tmp/assets/create_alerts.sh",
            "sed -i 's/SNS_TOPIC_ARN/" + sns_topic.topic_arn +
            "/g' /tmp/assets/create_alerts.sh",
            "sed -i 's=DOMAIN_ADMIN_UNAME=" + DOMAIN_ADMIN_UNAME +
            "=g' /tmp/assets/create_alerts.sh",
            "sed -i 's=DOMAIN_ADMIN_PW=" + DOMAIN_ADMIN_PW +
            "=g' /tmp/assets/create_alerts.sh",
            "systemctl restart nginx.service",
            "chmod 500 create_alerts.sh",
            "sleep 5",
            "bash --verbose create_alerts.sh",
        )

        core.CfnOutput(self,
                       "Dashboards URL (via Jump host)",
                       value="https://" + instance.instance_public_ip,
                       description="Dashboards URL via Jump host")

        core.CfnOutput(
            self,
            "SNS Subscription Alert Message",
            value=SNS_NOTIFICATION_EMAIL,
            description="Please confirm your SNS subscription receievedt at")