示例#1
0
app = core.App()
stack = core.Stack(app,
                   'EmrLaunchExamplesEnvStack',
                   env=core.Environment(
                       account=os.environ['CDK_DEFAULT_ACCOUNT'],
                       region=os.environ['CDK_DEFAULT_REGION']))

vpc = ec2.Vpc(
    stack,
    'EmrLaunchVpc',
    cidr="10.104.197.128/25",
    max_azs=1,
    nat_gateways=1,
    subnet_configuration=[
        ec2.SubnetConfiguration(name="public",
                                cidr_mask=26,
                                subnet_type=ec2.SubnetType.PUBLIC),
        ec2.SubnetConfiguration(name="private",
                                cidr_mask=26,
                                subnet_type=ec2.SubnetType.PRIVATE)
        #                            ec2.SubnetConfiguration(name="private", cidr_mask=26, subnet_type=ec2.SubnetType.ISOLATED)]
    ])

logs_bucket = s3.Bucket(
    stack,
    'EmrLaunchLogsBucket',
    bucket_name=f'{NAMING_PREFIX}-logs',
    block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
)
artifacts_bucket = s3.Bucket(
    stack,
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.current_dir = os.path.dirname(__file__)

        self.vpc = ec2.Vpc(
            self,
            "VPC",
            cidr="10.0.0.0/21",
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    cidr_mask=28,
                    name="Database",
                    subnet_type=ec2.SubnetType.ISOLATED,
                ),
                ec2.SubnetConfiguration(cidr_mask=28,
                                        name="Private",
                                        subnet_type=ec2.SubnetType.PRIVATE),
                ec2.SubnetConfiguration(cidr_mask=28,
                                        name="Public",
                                        subnet_type=ec2.SubnetType.PUBLIC)
            ],
            nat_gateways=3)

        self.qs_security_group = ec2.SecurityGroup(
            self,
            "quicksight-sg",
            vpc=self.vpc,
            allow_all_outbound=True,
            description="QuickSight security group")

        self.bastion = ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=self.vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC))

        self.bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                     "Internet access SSH")

        self.vpc.add_interface_endpoint(
            "redshift_endpoint",
            service=ec2.InterfaceVpcEndpointAwsService("redshift"))

        self.vpc.add_interface_endpoint(
            "rds_endpoint", service=ec2.InterfaceVpcEndpointAwsService("rds"))

        self.redshift_secret = secrets.Secret(
            self,
            'redshift-admin',
            secret_name='redshift-admin',
            description=
            "This secret has generated admin secret password for Redshift cluster",
            generate_secret_string=secrets.SecretStringGenerator(
                secret_string_template='{"username": "******"}',
                generate_string_key='password',
                password_length=32,
                exclude_characters='"@\\\/',
                exclude_punctuation=True))

        self.rs_security_group = ec2.SecurityGroup(self,
                                                   "redshift-sg",
                                                   vpc=self.vpc,
                                                   allow_all_outbound=True,
                                                   description="Redshift SG")

        self.rs_security_group.add_ingress_rule(self.rs_security_group,
                                                ec2.Port.all_tcp(),
                                                'Redshift-basic')

        self.rs_security_group.add_ingress_rule(
            # https://docs.aws.amazon.com/quicksight/latest/user/regions.html
            ec2.Peer.ipv4('52.23.63.224/27'),
            ec2.Port.tcp(5439),
            'QuickSight-IP')

        self.rs_security_group.add_ingress_rule(self.qs_security_group,
                                                ec2.Port.tcp(5439),
                                                'QuickSight-sg')

        # self.rs_security_group.add_egress_rule(
        #     self.rs_security_group,
        #     ec2.Port.all_tcp(),
        #     'Allow outbound for QuickSight'
        # )

        self.redshift_cluster = redshift.Cluster(
            self,
            "datasource-redshift",
            master_user=redshift.Login(
                master_username="******",
                master_password=self.redshift_secret.secret_value_from_json(
                    'password')),
            vpc=self.vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED),
            security_groups=[self.rs_security_group])

        self.rds_secret = secrets.Secret(
            self,
            'rds-admin',
            secret_name='rds-admin',
            description=
            "This secret has generated admin secret password for RDS cluster",
            generate_secret_string=secrets.SecretStringGenerator(
                secret_string_template='{"username": "******"}',
                generate_string_key='password',
                password_length=32,
                exclude_characters='"@\\\/',
                exclude_punctuation=True))

        self.rds_cluster = rds.DatabaseCluster(
            self,
            "datasource-rds",
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_9),
            instance_props={
                "vpc_subnets": {
                    "subnet_type": ec2.SubnetType.ISOLATED
                },
                "vpc": self.vpc
            },
            credentials=rds.Credentials.from_secret(self.rds_secret))

        self.rds_cluster.connections.allow_default_port_from(
            self.bastion, "EC2 Bastion access Aurora")

        self.rds_cluster.connections.allow_default_port_from(
            self.qs_security_group, "QuickSight-sg")

        self.rds_cluster.connections.allow_default_port_from(
            # https://docs.aws.amazon.com/quicksight/latest/user/regions.html
            ec2.Peer.ipv4('52.23.63.224/27'),
            "QuickSight-IP")

        self.qs_security_group.add_ingress_rule(self.rs_security_group,
                                                ec2.Port.all_tcp(), 'AllTCP')

        for rds_group in self.rds_cluster.connections.security_groups:
            self.qs_security_group.add_ingress_rule(rds_group,
                                                    ec2.Port.all_tcp(),
                                                    'AllTCP')

        # self.qs_security_group.add_egress_rule(
        #     self.rs_security_group,
        #     ec2.Port.all_tcp(),
        #     'AllTCP'
        # )

        core.CfnOutput(self, "vpcId", value=self.vpc.vpc_id)
        core.CfnOutput(self, "redshiftUsername", value="admin")
        core.CfnOutput(self, "redshiftPassword", value="redshift-admin")
        core.CfnOutput(self,
                       "redshiftClusterId",
                       value=self.redshift_cluster.cluster_name)
        core.CfnOutput(self,
                       "redshiftHost",
                       value=self.redshift_cluster.cluster_endpoint.hostname)
        core.CfnOutput(self, "redshiftDB", value="dev")
        core.CfnOutput(self, "rdsUsername", value="administrator")
        core.CfnOutput(self, "rdsPassword", value="rds-admin")
        core.CfnOutput(self,
                       "rdsClusterId",
                       value=self.rds_cluster.cluster_identifier)
        core.CfnOutput(self, "namespace", value="default")
        core.CfnOutput(self, "version", value="1")
        core.CfnOutput(self,
                       "quicksightSecurityGroupId",
                       value=self.qs_security_group.security_group_id)
示例#3
0
 def define_public_subnet(self):
     return ec2.SubnetConfiguration(cidr_mask=24,
                                    name="shared_public",
                                    subnet_type=ec2.SubnetType.PUBLIC)
示例#4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = aws_ec2.Vpc(self,
                               "demo-stepfunctions",
                               cidr="10.100.0.0/16",
                               max_azs=2,
                               nat_gateways=0,
                               subnet_configuration=[
                                   aws_ec2.SubnetConfiguration(
                                       name='demo-stepfunctions',
                                       subnet_type=aws_ec2.SubnetType.ISOLATED,
                                       cidr_mask=24)
                               ])
        lambda_role = iam.Role(
            self,
            'demo-lambda-role',
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))

        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaENIManagementAccess'))
        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))

        fn_submit = lambda_.Function(
            self,
            'demo-sfn-submit',
            function_name='demo-sfn-submit',
            handler='handler.do',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('./craftaws/func_submit'),
            role=lambda_role,
            timeout=core.Duration.seconds(900),
            allow_public_subnet=False,
            vpc=self.vpc,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED),
            environment={})

        fn_job_1 = lambda_.Function(
            self,
            'demo-sfn-job1',
            function_name='demo-sfn-job1',
            handler='handler.do',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('./craftaws/func_job_1'),
            role=lambda_role,
            timeout=core.Duration.seconds(900),
            allow_public_subnet=False,
            vpc=self.vpc,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED),
            environment={})

        fn_job_2 = lambda_.Function(
            self,
            'demo-sfn-job2',
            function_name='demo-sfn-job2',
            handler='handler.do',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('./craftaws/func_job_2'),
            role=lambda_role,
            timeout=core.Duration.seconds(900),
            allow_public_subnet=False,
            vpc=self.vpc,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED),
            environment={})

        submit_job = tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=fn_submit,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        step_1_job = tasks.LambdaInvoke(
            self,
            "Job_1",
            lambda_function=fn_job_1,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        wait_x = sfn.Wait(self,
                          "Wait X Seconds",
                          time=sfn.WaitTime.duration(
                              core.Duration.seconds(60)))

        step_2_job = tasks.LambdaInvoke(
            self,
            "Job_2",
            lambda_function=fn_job_1,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        job_succeed = sfn.Succeed(self,
                                  "Job Succeed",
                                  comment="AWS Batch Job Succeed")

        definition = submit_job.next(step_1_job).next(wait_x).next(
            step_2_job).next(job_succeed)

        sfn.StateMachine(self,
                         "StateMachine",
                         definition=definition,
                         timeout=core.Duration.minutes(5))
示例#5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        r = requests.get("http://ifconfig.me")
        myip = r.text + "/32"

        vpc = ec2.Vpc(self,
                      "VPC",
                      nat_gateways=0,
                      max_azs=3,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        sg = ec2.SecurityGroup(self,
                               "greg-sg",
                               vpc=vpc,
                               allow_all_outbound=True)
        # sg.add_ingress_rule(ec2.Peer.ipv4(myip), ec2.Port.tcp(22))

        instance = ec2.Instance(self,
                                "greg-ec2",
                                instance_type=ec2.InstanceType('c5.xlarge'),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                key_name='gregkey',
                                security_group=sg)

        core.CfnOutput(self,
                       "output_ssh_bastion_public_ip",
                       value=instance.instance_public_ip)
        core.CfnOutput(self,
                       "output_ssh_bastion_private_ip",
                       value=instance.instance_private_ip)

        # es domain helpful links
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-elasticsearchclusterconfig
        # https://github.com/aws/aws-cdk/issues/2873
        # https://sourcecodequery.com/example-method/core.Tag
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-elasticsearchclusterconfig

        es_cluster_config = {
            "InstanceCount": 3,
            "InstanceType": "m4.xlarge.elasticsearch",
            "DedicatedMasterEnabled": True,
            "DedicatedMasterCount": 3
        }
        es_access_policy = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Effect": "Allow",
                "Principal": {
                    "AWS": "*"
                },
                "Action": ["es:*"],
                "Condition": {
                    "IpAddress": {
                        "aws:SourceIp": [
                            myip, instance.instance_public_ip,
                            instance.instance_private_ip
                        ]
                    }
                },
            }]
        }
        es_storage = {
            "ebsEnabled": True,
            "volumeSize": 50,
            "volumeType": "gp2"
        }

        es_domain = elasticsearch.CfnDomain(
            self,
            "greg-es",
            elasticsearch_version="7.4",
            elasticsearch_cluster_config=es_cluster_config,
            access_policies=es_access_policy,
            ebs_options=es_storage,
        )

        core.CfnOutput(self,
                       "output_es_domain_endpoint",
                       value=es_domain.attr_domain_endpoint)
示例#6
0
 def subnet_configuration(self)->List[ec2.SubnetConfiguration]:
   return [
     ec2.SubnetConfiguration(name='Public',subnet_type= ec2.SubnetType.PUBLIC, cidr_mask=24),
     ec2.SubnetConfiguration(name='Hadoop',subnet_type= ec2.SubnetType.PRIVATE, cidr_mask=20),
   ]
示例#7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Lets create couple of instances to test):
        vpc = _ec2.Vpc(self,
                       "abacVPC",
                       cidr="10.13.0.0/21",
                       max_azs=2,
                       nat_gateways=0,
                       subnet_configuration=[
                           _ec2.SubnetConfiguration(
                               name="pubSubnet",
                               cidr_mask=24,
                               subnet_type=_ec2.SubnetType.PUBLIC)
                       ])
        core.Tag.add(vpc,
                     key="ServiceProvider",
                     value="KonStone",
                     include_resource_types=[])

        weak_sg = _ec2.SecurityGroup(
            self,
            "web_sec_grp",
            vpc=vpc,
            description="Allow internet access from the world",
            allow_all_outbound=True)
        # vpc_cidr_block
        # weak_sg.add_ingress_rule(_ec2.Peer.any_ipv4(),
        weak_sg.add_ingress_rule(_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                 _ec2.Port.tcp(22),
                                 "Allow SSH access from the VPC Only.")

        # We are using the latest AMAZON LINUX AMI
        # Benefit of having SSM Agent pre-installed
        ami_id = _ec2.AmazonLinuxImage(generation=_ec2.AmazonLinuxGeneration.
                                       AMAZON_LINUX_2).get_image(self).image_id

        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_iam/Role.html
        instace_profile_role = _iam.Role(
            self,
            'ec2ssmroleid',
            assumed_by=_iam.ServicePrincipal('ec2.amazonaws.com'),
            role_name="instace_profile_role")

        instace_profile_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        instance_profile_role_additional_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:logs:*:*:*",
            ],
            actions=["logs:Create*", "logs:PutLogEvents"])
        instance_profile_role_additional_perms.sid = "PutBucketPolicy"
        instace_profile_role.add_to_policy(
            instance_profile_role_additional_perms)

        inst_profile_01 = _iam.CfnInstanceProfile(
            self,
            "instProfile01Id",
            roles=[instace_profile_role.role_name],
        )

        # Let us bootstrap the server with the required agents
        try:
            with open("./bootstrap_scripts/install_agents.sh",
                      mode='rb') as file:
                bootstrap_data = file.read()
        except OSError:
            print('Failed to get UserData script')

        install_agents = _ec2.UserData.for_linux()
        install_agents.add_commands(str(bootstrap_data, 'utf-8'))

        # The EC2 Instance to monitor for failed SSH Logins
        ssh_monitored_inst_01 = _ec2.CfnInstance(
            self,
            "sshMonitoredInstance01",
            image_id=ami_id,
            instance_type="t2.micro",
            monitoring=False,
            tags=[{
                "key": "ServiceProvider",
                "value": "KonStone"
            }],
            iam_instance_profile=inst_profile_01.ref,
            network_interfaces=[{
                "deviceIndex": "0",
                "associatePublicIpAddress": True,
                "subnetId": vpc.public_subnets[0].subnet_id,
                "groupSet": [weak_sg.security_group_id]
            }],  #https: //github.com/aws/aws-cdk/issues/3419
            user_data=core.Fn.base64(install_agents.render()),
        )
        """
        linux_ami = _ec2.GenericLinuxImage({ "cn-northwest-1": "ami-0f62e91915e16cfc2","eu-west-1": "ami-12345678"})
        ssh_monitored_inst_01_02 = _ec2.Instance(self,
            "monitoredInstance02",
            instance_type=_ec2.InstanceType(instance_type_identifier="t2.micro"),
            instance_name="monitoredInstance02",
            machine_image=linux_ami,
            vpc=vpc,
            security_group=[weak_sg.security_group_id],
            # vpc_subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC)
            vpc_subnets=vpc.public_subnets[0].subnet_id,
            # user_data=_ec2.UserData.custom(t_user_data)
            )
        """

        # The log group name to store logs
        info_sec_ops_log_group = _logs.LogGroup(
            self,
            "infoSecOpsLogGroupId",
            log_group_name=(f"/Mystique/InfoSec/Automation/"
                            f"{ssh_monitored_inst_01.ref}"),
            retention=_logs.RetentionDays.ONE_WEEK)

        # Defines an AWS Lambda resource

        with open("lambda_src/quarantine_ec2_instance.py",
                  encoding="utf8") as fp:
            quarantine_ec2_instance_fn_handler_code = fp.read()

        quarantine_ec2_instance_fn = _lambda.Function(
            self,
            id='quarantineEc2InstanceFnId',
            function_name="quarantine_ec2_instance",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(quarantine_ec2_instance_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(5))
        quarantine_ec2_instance_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "*",
            ],
            actions=[
                "ec2:RevokeSecurityGroupIngress",
                "ec2:DescribeSecurityGroupReferences",
                "ec2:RevokeSecurityGroupEgress",
                "ec2:ApplySecurityGroupsToClientVpnTargetNetwork",
                "ec2:DescribeSecurityGroups", "ec2:CreateSecurityGroup",
                "ec2:DescribeInstances", "ec2:CreateTags", "ec2:StopInstances",
                "ec2:CreateVolume", "ec2:CreateSnapshots",
                "ec2:CreateSnapshot", "ec2:DescribeSnapshots",
                "ec2:ModifyInstanceAttribute"
            ])
        quarantine_ec2_instance_fn_perms.sid = "AllowLambdaToQuarantineEC2"
        quarantine_ec2_instance_fn.add_to_role_policy(
            quarantine_ec2_instance_fn_perms)

        info_sec_ops_topic = _sns.Topic(self,
                                        "infoSecOpsTopicId",
                                        display_name="InfoSecTopic",
                                        topic_name="InfoSecOpsTopic")

        # Ref: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        ###############################################################################
        ################# STEP FUNCTIONS EXPERIMENTAL CODE - UNSTABLE #################
        ###############################################################################

        quarantine_ec2_instance_task = _sfn.Task(
            self,
            "Quarantine EC2 Instance",
            task=_tasks.InvokeFunction(quarantine_ec2_instance_fn),
            result_path="$")

        notify_secops_task = _sfn.Task(
            self,
            "Notify InfoSecOps",
            task=_tasks.PublishToTopic(
                info_sec_ops_topic,
                integration_pattern=_sfn.ServiceIntegrationPattern.
                FIRE_AND_FORGET,
                message=_sfn.TaskInput.from_data_at("$.message"),
                subject="SSH Error Response Notification"))

        ssh_error_response_failure = _sfn.Fail(
            self,
            "SSH Error Response Actions Failed",
            cause="All Response Actions were NOT completed",
            error="Check Logs")

        ssh_error_response_success = _sfn.Succeed(
            self,
            "SSH Error Response Actions Succeeded",
            comment="All Response Action Completed Successfully",
        )

        ssh_error_response_sfn_definition = quarantine_ec2_instance_task\
            .next(notify_secops_task\
                .next(_sfn.Choice(self, "SSH Errors Response Complete?")\
                    .when(_sfn.Condition.number_equals("$.SdkHttpMetadata.HttpStatusCode", 200),ssh_error_response_success)\
                    .when(_sfn.Condition.not_(
                        _sfn.Condition.number_equals("$.SdkHttpMetadata.HttpStatusCode", 200)), ssh_error_response_failure)\
                    .otherwise(ssh_error_response_failure)
                    )
            )

        ssh_error_response_statemachine = _sfn.StateMachine(
            self,
            "stateMachineId",
            definition=ssh_error_response_sfn_definition,
            timeout=core.Duration.minutes(5))

        ###############################################################################
        ################# STEP FUNCTIONS EXPERIMENTAL CODE - UNSTABLE #################
        ###############################################################################

        # LAMBDA TO TRIGGER STATE MACHINE - since state cannot be invoked by SNS
        with open("lambda_src/trigger_state_machine.py",
                  encoding="utf8") as fp:
            trigger_state_machine_fn_handler_code = fp.read()

        trigger_state_machine_fn = _lambda.Function(
            self,
            id='sshErrorResponseFnId',
            function_name="trigger_ssh_error_response_state_machine_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(trigger_state_machine_fn_handler_code),
            # code=_lambda.Code.asset("lambda_src/is_policy_permissive.py"),
            # code=_lambda.Code.asset('lambda_src'),
            # code=_lambda.InlineCode(code_body),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(5),
            environment={
                "STATE_MACHINE_ARN":
                f"{ssh_error_response_statemachine.state_machine_arn}",
            })

        trigger_state_machine_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                f"{ssh_error_response_statemachine.state_machine_arn}",
            ],
            actions=["states:StartExecution"])
        trigger_state_machine_fn_perms.sid = "PutBucketPolicy"
        trigger_state_machine_fn.add_to_role_policy(
            trigger_state_machine_fn_perms)
        """
        version = trigger_state_machine_fn.add_version(name=datetime.now().isoformat())
        trigger_state_machine_fn_alias = _lambda.Alias(self, 
            'lmdaAliasId',
            alias_name='MystiqueTestAlias',
            version=version
            )
        """

        # Lets add permission to SNS to trigger our lambda function
        trigger_lambda_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                trigger_state_machine_fn.function_arn,
            ],
            actions=[
                "lambda:InvokeFunction",
            ])
        trigger_lambda_perms.sid = "TriggerLambaFunction"
        # info_sec_ops_topic.add_to_resource_policy( trigger_lambda_perms )

        # Subscribe InfoSecOps Email to topic
        info_sec_ops_topic.add_subscription(
            _subs.EmailSubscription(global_args.INFO_SEC_OPS_EMAIL))
        # info_sec_ops_topic.add_subscription(_subs.LambdaSubscription(trigger_state_machine_fn))

        trigger_state_machine_fn_alarm = trigger_state_machine_fn.metric_all_errors(
        ).create_alarm(
            self,
            "fn-error-alarm",
            threshold=5,
            alarm_name="trigger_state_machine_fn_error_alarm",
            evaluation_periods=5,
            period=core.Duration.minutes(1),
        )

        subscribe_trigger_state_machine_fn_to_logs = _logs.SubscriptionFilter(
            self,
            "sshErrorLogSubscriptionId",
            log_group=info_sec_ops_log_group,
            destination=_logs_destination.LambdaDestination(
                trigger_state_machine_fn),
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "status",
                "...").where_string("status", "=", "Invalid"),
        )

        # https://pypi.org/project/aws-cdk.aws-logs/
        # We are creating three filter
        # tooManySshDisconnects, invalidSshUser and invalidSshKey:
        # When a user tries to SSH with invalid username the next line is logged in the SSH log file:
        # Apr 20 02:39:35 ip-172-31-63-56 sshd[17136]: Received disconnect from xxx.xxx.xxx.xxx: 11:  [preauth]
        too_many_ssh_disconnects_metric = _cloudwatch.Metric(
            namespace=f"{global_args.OWNER}",
            metric_name="tooManySshDisconnects")
        too_many_ssh_disconnects_filter = _logs.MetricFilter(
            self,
            "tooManySshDisconnectsFilterId",
            log_group=info_sec_ops_log_group,
            metric_namespace=too_many_ssh_disconnects_metric.namespace,
            metric_name=too_many_ssh_disconnects_metric.metric_name,
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "msg1", "msg2",
                "...").where_string("msg2", "=", "disconnect"),
            metric_value="1")

        invalid_ssh_user_metric = _cloudwatch.Metric(
            namespace=f"{global_args.OWNER}",
            metric_name="invalidSshUser",
        )
        invalid_ssh_user_filter = _logs.MetricFilter(
            self,
            "invalidSshUserFilterId",
            log_group=info_sec_ops_log_group,
            metric_namespace=invalid_ssh_user_metric.namespace,
            metric_name=invalid_ssh_user_metric.metric_name,
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "status",
                "...").where_string("status", "=", "Invalid"),
            metric_value="1")

        invalid_ssh_key_metric = _cloudwatch.Metric(
            namespace=f"{global_args.OWNER}", metric_name="invalidSshKey")

        invalid_ssh_key_filter = _logs.MetricFilter(
            self,
            "invalidSshKeyFilterId",
            log_group=info_sec_ops_log_group,
            metric_namespace=invalid_ssh_key_metric.namespace,
            metric_name=invalid_ssh_key_metric.metric_name,
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "msg1", "msg2",
                "...").where_string("msg1", "=", "Connection").where_string(
                    "msg2", "=", "closed"),
            metric_value="1")

        # Now let us create alarms
        # alarm is raised there are more than 5(threshold) of the measured metrics in two(datapoint) of the last three seconds(evaluation):
        # Period=60Seconds, Eval=3, Threshold=5
        too_many_ssh_disconnects_alarm = _cloudwatch.Alarm(
            self,
            "tooManySshDisconnectsAlarmId",
            alarm_name="too_many_ssh_disconnects_alarm",
            alarm_description=
            "The number disconnect requests is greater then 5, even 1 time in 3 minutes",
            metric=too_many_ssh_disconnects_metric,
            actions_enabled=True,
            period=core.Duration.minutes(1),
            threshold=5,
            evaluation_periods=3,
            datapoints_to_alarm=1,
            statistic="sum",
            comparison_operator=_cloudwatch.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD)

        invalid_ssh_user_alarm = _cloudwatch.Alarm(
            self,
            "invalidSshUserAlarmId",
            alarm_name="too_many_invalid_ssh_users_alarm",
            alarm_description=
            "The number of invalid ssh users connecting is greater then 5, even 1 time in 3 minutes",
            metric=invalid_ssh_user_metric,
            actions_enabled=True,
            period=core.Duration.minutes(1),
            threshold=5,
            evaluation_periods=3,
            datapoints_to_alarm=1,
            statistic="sum",
            comparison_operator=_cloudwatch.ComparisonOperator.
            GREATER_THAN_THRESHOLD)
        invalid_ssh_user_alarm.add_alarm_action(
            _cloudwatch_actions.SnsAction(info_sec_ops_topic))

        invalid_ssh_key_alarm = _cloudwatch.Alarm(
            self,
            "invalidSshKeyAlarmId",
            alarm_name="too_many_invalid_ssh_key_alarm",
            alarm_description=
            "The number of invalid ssh keys connecting is greater then 5, even 1 time in 3 minutes",
            metric=invalid_ssh_key_metric,
            actions_enabled=True,
            period=core.Duration.minutes(1),
            threshold=5,
            evaluation_periods=3,
            datapoints_to_alarm=1,
            statistic="sum",
            comparison_operator=_cloudwatch.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD)
        invalid_ssh_key_alarm.add_alarm_action(
            _cloudwatch_actions.SnsAction(info_sec_ops_topic))

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output0 = core.CfnOutput(
            self,
            "SecuirtyAutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output1_1 = core.Fn.get_att(
            logical_name_of_resource="sshMonitoredInstance01",
            attribute_name="PublicIp")
        output1 = core.CfnOutput(self,
                                 "MonitoredInstance",
                                 value=output1_1.to_string(),
                                 description="Web Server Public IP to attack")

        output2 = core.CfnOutput(
            self,
            "SSHAlarms",
            value=
            (f"https://console.aws.amazon.com/cloudwatch/home?region="
             f"{core.Aws.REGION}"
             f"#/configuration/"
             f"#alarmsV2:?search=ssh&alarmStateFilter=ALL&alarmTypeFilter=ALL"
             ),
            description="Check out the cloudwatch Alarms")

        output3 = core.CfnOutput(
            self,
            "SubscribeToNotificationTopic",
            value=(f"https://console.aws.amazon.com/sns/v3/home?"
                   f"{core.Aws.REGION}"
                   f"#/topic/"
                   f"{info_sec_ops_topic.topic_arn}"),
            description=
            "Add your email to subscription and confirm subscription")

        output_test_1 = core.CfnOutput(
            self,
            "ToGenInvalidKeyErrors",
            value=
            (f"for i in {{1..30}}; do ssh -i $RANDOM ec2-user@{output1_1.to_string()}; sleep 2; done &"
             ),
            description=
            "Generates random key names and connects to server 30 times over 60 seconds"
        )

        output_test_2 = core.CfnOutput(
            self,
            "ToGenInvalidUserErrors",
            value=
            (f"for i in {{1..30}}; do ssh ec2-user$RANDOM@{output1_1.to_string()}; sleep 2; done &"
             ),
            description=
            "Generates random user names and connects to server 30 times over 60 seconds"
        )
        """
示例#8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # s3
        s3_bucket_name = "{}-s3-{}".format(Constant.PROJECT_NAME,
                                           self._get_UUID(4))
        _s3.Bucket(
            self,
            id=s3_bucket_name,
            bucket_name=s3_bucket_name,
            removal_policy=core.RemovalPolicy.
            DESTROY,  #TODO:  destroy for test
            # removal_policy=core.RemovalPolicy.RETAIN
        )

        # step 1. VPC
        # 如果在已有的Vpc 中建立环境, 可以用下面这句, 需要传入 vpc_id
        # vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id='')
        vpc = ec2.Vpc(
            self,
            "VPC",
            max_azs=2,  # 两个分区, 每个分区建一个子网
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24),
                # ec2.SubnetConfiguration(
                #     subnet_type=ec2.SubnetType.ISOLATED,
                #     name="DB",
                #     cidr_mask=24
                # )
            ],
            # nat_gateway_provider=ec2.NatProvider.gateway(),
            # nat_gateways=2,
        )

        # ES 需要部署到私有子网中
        selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE)

        # step 2. 访问S3 + ES集群需要的 iam_instance_profile
        #  action -> statement -> policy -> role -> instance profile ->  attach ec2

        actions = [
            "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface",
            "ec2:DescribeNetworkInterfaces",
            "ec2:ModifyNetworkInterfaceAttribute",
            "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets",
            "ec2:DescribeVpcs", "s3:*"
        ]

        policyStatement = PolicyStatement(actions=actions, effect=Effect.ALLOW)
        policyStatement.add_all_resources()
        policyStatement.sid = "Stmt1480452973134"

        policy_name = "{}-ec2-es-policy".format(Constant.PROJECT_NAME)
        ec2_policy = Policy(self, policy_name, policy_name=policy_name)

        ec2_policy.add_statements(policyStatement)

        role_name = "{}-ec2-es-role".format(Constant.PROJECT_NAME)
        access_es_role = Role(
            self,
            role_name,
            role_name=role_name,
            assumed_by=ServicePrincipal('ec2.amazonaws.com.cn'))

        ec2_policy.attach_to_role(access_es_role)

        profile_name = "{}-ec2-es-profile".format(Constant.PROJECT_NAME)
        instance_profile = CfnInstanceProfile(
            self,
            profile_name,
            instance_profile_name=profile_name,
            roles=[access_es_role.role_name])

        # step 4. ES

        # 生产环境建议设置安全组, 只接收VPC内443端口请求
        sg_es_cluster_name = "{}-sg-es".format(Constant.PROJECT_NAME)
        sg_es_cluster = ec2.SecurityGroup(
            self,
            id=sg_es_cluster_name,
            vpc=vpc,
            security_group_name=sg_es_cluster_name)

        sg_es_cluster.add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                       connection=ec2.Port.tcp(443))

        es_name = Constant.PROJECT_NAME
        es_arn = self.format_arn(service="es",
                                 resource="domain",
                                 sep="/",
                                 resource_name=es_name)
        es = elasticsearch.CfnDomain(
            self,
            es_name,
            elasticsearch_version='7.1',
            domain_name=es_name,
            node_to_node_encryption_options={"enabled": False},
            vpc_options={
                "securityGroupIds": [sg_es_cluster.security_group_id
                                     ],  # 生产环境建议设置安全组, 只接收VPC内443端口请求
                # 如果开启多个节点, 需要配置多个子网, 目前测试只有一个ES 节点, 就只用到一个子网
                "subnetIds": selection.subnet_ids[:1]
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            elasticsearch_cluster_config={
                # 生成环境需要开启三个
                # "dedicatedMasterCount": 3,
                # "dedicatedMasterEnabled": True,
                # "dedicatedMasterType": 'm4.large.elasticsearch',
                "instanceCount": 1,
                "instanceType": 'm4.large.elasticsearch',
                "zoneAwarenessEnabled": False
            })
        es.access_policies = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Effect": "Allow",
                "Principal": {
                    "AWS": "*"
                },
                "Action": "es:*",
                "Resource": "{}/*".format(es_arn)
            }]
        }

        # step 5.  SNS
        topic = sns.Topic(self, "topic")
        topic.add_subscription(subs.EmailSubscription(Constant.EMAIL_ADDRESS))

        # 设置SNS endpoint, 让lambda 可以从vpc 内部访问
        vpc.add_interface_endpoint(
            "SNSEndpoint", service=ec2.InterfaceVpcEndpointAwsService.SNS)

        # step 6. Lambda
        lambdaFn = lambda_.Function(self,
                                    "Singleton",
                                    code=lambda_.Code.asset('lambda'),
                                    handler='hello.handler',
                                    vpc=vpc,
                                    vpc_subnets=ec2.SubnetSelection(
                                        subnet_type=ec2.SubnetType.PRIVATE),
                                    timeout=core.Duration.seconds(300),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    environment={
                                        'SNS_TOPIC_ARN': topic.topic_arn,
                                        'ES_ENDPOINT': es.attr_domain_endpoint,
                                        'ES_INDEX_NAME': Constant.ES_INDEX_NAME
                                    })

        # step 7. Cloud watch event
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='0/5',
                                          hour='*',
                                          month='*',
                                          week_day='*',
                                          year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))

        #给Lambda 添加发布SNS的权限
        topic.grant_publish(lambdaFn)

        # Create ALB
        alb_name = "{}-alb".format(Constant.PROJECT_NAME)
        alb = elb.ApplicationLoadBalancer(self,
                                          alb_name,
                                          vpc=vpc,
                                          internet_facing=True,
                                          load_balancer_name=alb_name)
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80),
                                            "Internet access ALB 80")
        listener = alb.add_listener("my80", port=80, open=True)

        # Create Autoscaling Group with fixed 2*EC2 hosts

        user_data = user_data_content.format(es.attr_domain_endpoint,
                                             Constant.REGION_NAME,
                                             Constant.ES_LOG_PATH,
                                             Constant.ES_INDEX_NAME,
                                             s3_bucket_name)

        # step 3. 创建堡垒机

        bastion_name = "{}-bastion".format(Constant.PROJECT_NAME)
        bastion = ec2.BastionHostLinux(
            self,
            bastion_name,
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name=bastion_name,
            instance_type=ec2.InstanceType(
                instance_type_identifier="m4.large"))
        bastion.instance.instance.add_property_override(
            "KeyName", Constant.EC2_KEY_NAME)
        bastion.connections.allow_from_any_ipv4(
            ec2.Port.tcp(22), "Internet access SSH")  # 生成环境可以限定IP allow_from
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(8080),
                                                "Internet access HTTP")  # 测试需要
        # bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(443), "Internet access HTTPS")  # 测试需要

        bastion.instance.instance.iam_instance_profile = instance_profile.instance_profile_name  # 给EC2设置 profile , 相当于Role
        bastion.instance.instance.image_id = ami_map.get(
            Constant.REGION_NAME)  # 指定AMI ID

        #堡垒机的user_data 只能执行一次, 如果要执行多次, 请参考 https://amazonaws-china.com/premiumsupport/knowledge-center/execute-user-data-ec2/?nc1=h_ls
        bastion_user_data = "/home/ec2-user/start.sh {}  {} '{}' {} {}".format(
            es.attr_domain_endpoint, Constant.REGION_NAME,
            Constant.ES_LOG_PATH, Constant.ES_INDEX_NAME, s3_bucket_name)
        bastion.instance.add_user_data(
            "date >> /home/ec2-user/root.txt")  # 查看启动脚本是否执行
        bastion.instance.add_user_data(bastion_user_data)

        asg_name = "{}-asg".format(Constant.PROJECT_NAME)
        asg = autoscaling.AutoScalingGroup(
            self,
            asg_name,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),  # PUBLIC for debug
            instance_type=ec2.InstanceType(
                instance_type_identifier="m4.large"),
            machine_image=my_ami,
            key_name=Constant.EC2_KEY_NAME,
            user_data=ec2.UserData.custom(user_data),
            desired_capacity=1,
            min_capacity=1,
            max_capacity=1,
            role=access_es_role)

        asg.connections.allow_from(
            alb, ec2.Port.tcp(8080),
            "ALB access 80 port of EC2 in Autoscaling Group")
        # asg.connections.allow_from_any_ipv4(ec2.Port.tcp(8080), "Internet access HTTP for test") # 测试用
        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                            "Internet access SSH")  # for debug
        listener.add_targets("addTargetGroup", port=8080, targets=[asg])

        core.CfnOutput(self,
                       "s3_bucket_name",
                       value=s3_bucket_name,
                       description='S3 bucket:  store web log')

        core.CfnOutput(self,
                       "ElasticSearchEndpointUrl",
                       value=es.attr_domain_endpoint,
                       description='Elastic Search Url')

        # Elastic Search 统计log 数量, 可以在堡垒机上执行, 快速查看日志数量。
        core.CfnOutput(self,
                       "CmdGetCountIndex",
                       value='curl https://{}/{}/_count'.format(
                           es.attr_domain_endpoint, Constant.ES_INDEX_NAME),
                       description='Count search result. ')

        # 堡垒机的登录命令, 可以直接复制使用
        core.CfnOutput(self,
                       "CmdSshToBastion",
                       value='ssh -i ~/{}.pem ec2-user@{}'.format(
                           Constant.EC2_KEY_NAME,
                           bastion.instance_public_dns_name),
                       description='cmd ssh to bastion')

        # 在堡垒机上启动服务的命令, 堡垒机重启以后, 需要执行下面的命令, 可以启动web服务 发送日志到ES
        core.CfnOutput(
            self,
            "CmdSshBastionStartWeb",
            value='sudo {}'.format(bastion_user_data),
            description="Cmd to start web+logstash+filebeat service")

        # ALB 的访问地址
        core.CfnOutput(self,
                       "UrlLoad_Balancer",
                       value='http://{}'.format(alb.load_balancer_dns_name),
                       description='ALB  url ')

        # 堡垒机的web访问地址, 为了调试方便, 在堡垒机上也使用相同的AMI。
        core.CfnOutput(self,
                       "UrlBastion",
                       value='http://{}:8080'.format(
                           bastion.instance_public_dns_name),
                       description="Bastion server web url ")

        # 下面这条输出的命令 是通过堡垒机和Elasticsearch 建立隧道, 在本地访问kibana。
        core.CfnOutput(
            self,
            "CmdSshProxyToKibana",
            value='ssh -i ~/{}.pem ec2-user@{}  -N -L 9200:{}:443'.format(
                Constant.EC2_KEY_NAME, bastion.instance_public_dns_name,
                es.attr_domain_endpoint),
            description="cmd: access kibana from bastion ssh. ")
        # 执行完上面的命令后, 在浏览器中打开下面的连接
        core.CfnOutput(self,
                       "UrlKibana",
                       value='https://localhost:9200/_plugin/kibana/',
                       description="kibana url ")
示例#9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            scope=self,
            id="ECS-VPC",
            enable_dns_hostnames=True,
            enable_dns_support=True,
            cidr=constants.VPC_CIDR,
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    name='dmz',
                    subnet_type=ec2.SubnetType.PUBLIC
                ),
                ec2.SubnetConfiguration(
                    name='trust',
                    subnet_type=ec2.SubnetType.PRIVATE
                ),
                # ec2.SubnetConfiguration(
                #     name='isolated',
                #     subnet_type=ec2.SubnetType.ISOLATED,
                # ),
            ],
        )
        self.vpc = vpc

        cluster = ecs.Cluster(
            scope=self,
            id='ECS-CLUSTER',
            vpc=vpc,
            cluster_name=constants.ECS_CLUSTER_NAME
        )

        asg = autoscaling.AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            key_name=constants.SSH_KEY_NAME,
            block_devices=[
                autoscaling.BlockDevice(
                    device_name="/dev/xvda",
                    volume=autoscaling.BlockDeviceVolume(ebs_device=autoscaling.EbsDeviceProps(
                        delete_on_termination=True,
                        volume_type=autoscaling.EbsDeviceVolumeType.GP2,
                        volume_size=100,
                    )),
                ),
                # autoscaling.BlockDevice(
                #     device_name="/dev/xvdb",
                #     volume=autoscaling.BlockDeviceVolume(ebs_device=autoscaling.EbsDeviceProps(
                #         delete_on_termination=True,
                #         volume_type=autoscaling.EbsDeviceVolumeType.GP2,
                #         volume_size=50,
                #     )),
                # ),
            ],
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType("t2.xlarge"),
            machine_image=ecs.EcsOptimizedAmi(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            min_capacity=2,
        )

        with open( os.path.join("ecs", "userData.sh" )) as f:
            user_data = f.read()

        asg.add_user_data(user_data)
        cluster.add_auto_scaling_group(asg)

        self.cluster = cluster
示例#10
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        # Setup ALB an AutoScalingGroup with fixed capacity (no scaling event)
        # a VPC with 2 public + 2 private Subnets, 1 NATinstance spread over 2 AZs
        # ASG-Instances are configured with an Instance-Role to use SSM and a Web-Server installed through user-data

        # Configure the `natGatewayProvider` when defining a Vpc
        nat_gateway_provider = ec2.NatProvider.instance(
            instance_type=ec2.InstanceType("t3.small"))
        # Build a VPC plus Subnet Configuration, Routing tables and relevant routes
        vpc = ec2.Vpc(
            self,
            "VPC",
            cidr="10.0.0.0/16",
            max_azs=2,
            nat_gateway_provider=nat_gateway_provider,

            # The 'natGateways' parameter now controls the number of NAT instances
            nat_gateways=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    # 'subnetType' controls Internet access, as described above.
                    subnet_type=ec2.SubnetType.PUBLIC,
                    name="Public",
                    cidr_mask=24),
                ec2.SubnetConfiguration(
                    subnet_type=ec2.SubnetType.PRIVATE,
                    name="Private",
                    cidr_mask=24,
                )
            ])

        # Create the load balancer in a VPC. 'internetFacing' is 'false'
        # by default, which creates an internal load balancer.
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=vpc,
                                           internet_facing=True)

        # Add a listener and open up the load balancer's security group
        # to the world.
        listener = lb.add_listener("Listener", port=80, open=True)

        # Create ServiceRole for EC2 instances; enable SSM usage
        EC2InstanceRole = iam.Role(
            self,
            "Role",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSSMManagedInstanceCore")
            ],
            description="This is a custom role for assuming SSM role")

        # Create an AutoScaling group; define InstanceRole and specify InstanceType
        auto_scaling_group = autoscaling.AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            min_capacity=2,
            max_capacity=2,
            role=EC2InstanceRole,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
                edition=ec2.AmazonLinuxEdition.STANDARD,
                virtualization=ec2.AmazonLinuxVirt.HVM,
                storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE))

        # add the AutoScaling group as target to the listener.
        listener.add_targets("ApplicationFleet",
                             port=80,
                             targets=[auto_scaling_group])

        # read userdata script to install a simple WebServer
        # on the ASG-Instances
        with open('./userdata/webserver.sh', 'r') as myfile:
            userdata_script = myfile.read()

        auto_scaling_group.add_user_data(userdata_script)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        public_subnets = ec2.SubnetConfiguration(
                name="Public",
                subnet_type=ec2.SubnetType.PUBLIC,
                cidr_mask=24)

        tier2_subnets = ec2.SubnetConfiguration(
                name="Tier2",
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
                cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(
            self,
            'AWS-Cookbook-VPC',
            max_azs=2,
            cidr='10.10.0.0/22',
            subnet_configuration=[public_subnets, tier2_subnets]
        )

        tier2_subnet_list = vpc.select_subnets(subnet_group_name="Tier2")

        # -------- Begin EC2 Helper ---------
        vpc.add_interface_endpoint(
            'VPCSSMInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssm'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_interface_endpoint(
            'VPCEC2MessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ec2messages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_interface_endpoint(
            'VPCSSMMessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssmmessages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
        )

        iam_role = iam.Role(self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        iam_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2RoleforSSM"))

        instance1 = ec2.Instance(
            self,
            "Instance1",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnets=[tier2_subnet_list.subnets[0]]
            )
        )

        instance2 = ec2.Instance(
            self,
            "Instance2",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnets=[tier2_subnet_list.subnets[0]]
            )
        )

        CfnOutput(
            self,
            'InstanceId1',
            value=instance1.instance_id
        )

        CfnOutput(
            self,
            'InstanceId2',
            value=instance2.instance_id
        )
        # -------- End EC2 Helper ---------

        # outputs

        CfnOutput(
            self,
            'VpcId',
            value=vpc.vpc_id
        )

        public_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PUBLIC)

        CfnOutput(
            self,
            'VpcPublicSubnet1',
            value=public_subnets.subnets[0].subnet_id
        )

        CfnOutput(
            self,
            'PrivateRtId1',
            value=tier2_subnet_list.subnets[0].route_table.route_table_id
        )

        CfnOutput(
            self,
            'PrivateRtId2',
            value=tier2_subnet_list.subnets[1].route_table.route_table_id
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # TODO: Criar  IAM Roles
        # sm_role_principal = _iam.IPrincipal('sagemaker.amazonaws.com')
        # sm_managed_policy = _iam.IManagedPolicy('AmazonSageMakerFullAccess')
        # sm_iam_role = _iam.Role(self, id='sagemaker_role', assumed_by=sm_role_principal)

        # TODO: criar security groups do publico pra privada e da privada pra db

        vpc_main = _ec2.Vpc(
            self,
            'vpc-main-prd',
            cidr='10.0.0.0/16',
            max_azs=2,
            subnet_configuration=[
                _ec2.SubnetConfiguration(name='Ingress',
                                         subnet_type=_ec2.SubnetType.PUBLIC,
                                         cidr_mask=24),
                _ec2.SubnetConfiguration(name='in-app',
                                         subnet_type=_ec2.SubnetType.PRIVATE,
                                         cidr_mask=24),
                _ec2.SubnetConfiguration(name='in-db',
                                         subnet_type=_ec2.SubnetType.ISOLATED,
                                         cidr_mask=28)
            ])

        # Security Group Basics
        ipv4_peer = _ec2.Peer.any_ipv4()
        https_port = _ec2.Port(protocol=_ec2.Protocol.TCP,
                               from_port=443,
                               to_port=443,
                               string_representation='HTTPS-PORT')

        # Security Groups
        sg_lambda_function1 = _ec2.SecurityGroup(
            self,
            id='lambda-function1',
            vpc=vpc_main,
            security_group_name='lambda-function1',
            description='SecurityGroup for LambdaFunction1',
            allow_all_outbound=True)

        sg_lambda_function1.add_ingress_rule(peer=ipv4_peer,
                                             connection=https_port)

        # Tags
        core.Tag.add(sg_lambda_function1,
                     key='Name',
                     value='lambda-function1-SG')

        # TODO: Necessidades em ordem de prioridade
        # 1- Buckets s3:
        # sagemaker-dumps
        # datascience
        # 2- Sagemaker Notebook Instance (conectando no s3 bucket dedicado)
        # 3- Lambda Function
        # 4- API Gateway
        # 5- Infra para Airflow

        # TODO: lambda_s3_bucket
        lambda_code_bucket = _s3.Bucket(
            self,
            'lambdacode',
            bucket_name='lambda-code-data-2019',
            encryption=_s3.BucketEncryption.KMS_MANAGED,
            block_public_access=_s3.BlockPublicAccess(
                restrict_public_buckets=True))

        # TODO: lambda
        lambda_function_with_code = _lambda.Function(
            self,
            id='lambda_function1',
            code=_lambda.Code.asset('lambda'),
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler='lambda-handler.handler',
            vpc=vpc_main,
            vpc_subnets=_ec2.SubnetSelection(
                subnet_type=_ec2.SubnetType.PRIVATE),
            security_group=sg_lambda_function1)
        # TODO: api gatewaycd
        api_gtw_lambda = _apigw.LambdaRestApi(
            self, 'function1Api', handler=lambda_function_with_code)
    def __init__(self, scope: core.Construct, id: str, cidr_range: str,
                 tgw_asn: int, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC Creation
        self.vpc = ec2.Vpc(
            self,
            f"{kwargs['env']['region']}-vpc",
            max_azs=1,
            cidr=cidr_range,
            # configuration will create 1 subnet in a single AZ.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.ISOLATED,
                                        name="Isolated",
                                        cidr_mask=25)
            ])

        # Transit Gateway creation
        self.tgw = ec2.CfnTransitGateway(
            self,
            id=f"TGW-{kwargs['env']['region']}",
            amazon_side_asn=tgw_asn,
            auto_accept_shared_attachments="enable",
            default_route_table_association="enable",
            default_route_table_propagation="enable",
            tags=[
                core.CfnTag(key='Name', value=f"tgw-{kwargs['env']['region']}")
            ])

        # Transit Gateway attachment to the VPC
        self.tgw_attachment = ec2.CfnTransitGatewayAttachment(
            self,
            id=f"tgw-vpc-{kwargs['env']['region']}",
            transit_gateway_id=self.tgw.ref,
            vpc_id=self.vpc.vpc_id,
            subnet_ids=[
                subnet.subnet_id for subnet in self.vpc.isolated_subnets
            ],
            tags=[
                core.CfnTag(key='Name',
                            value=f"tgw-{self.vpc.vpc_id}-attachment")
            ])

        # VPC Endpoint creation for SSM (3 Endpoints needed)
        ec2.InterfaceVpcEndpoint(
            self,
            "VPCe - SSM",
            service=ec2.InterfaceVpcEndpointService(
                core.Fn.sub("com.amazonaws.${AWS::Region}.ssm")),
            private_dns_enabled=True,
            vpc=self.vpc,
        )

        ec2.InterfaceVpcEndpoint(
            self,
            "VPCe - EC2 Messages",
            service=ec2.InterfaceVpcEndpointService(
                core.Fn.sub("com.amazonaws.${AWS::Region}.ec2messages")),
            private_dns_enabled=True,
            vpc=self.vpc,
        )

        ec2.InterfaceVpcEndpoint(
            self,
            "VPCe - SSM Messages",
            service=ec2.InterfaceVpcEndpointService(
                core.Fn.sub("com.amazonaws.${AWS::Region}.ssmmessages")),
            private_dns_enabled=True,
            vpc=self.vpc,
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        public_subnets = ec2.SubnetConfiguration(
            name="Public", subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24)

        private_subnets = ec2.SubnetConfiguration(
            name="Tier2",
            subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT,
            cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(self,
                      'AWS-Cookbook-VPC',
                      cidr='10.10.0.0/22',
                      subnet_configuration=[public_subnets, private_subnets])

        fargate_service_security_group = ec2.SecurityGroup(
            self,
            'fargate_service_security_group',
            description='Security Group for the Fargate Service',
            allow_all_outbound=True,
            vpc=vpc)

        # create ECS Cluster
        ecs_cluster = ecs.Cluster(self,
                                  'AWS-Cookbook-EcsCluster',
                                  cluster_name='awscookbook207',
                                  vpc=vpc)

        FargateTask = ecs.FargateTaskDefinition(
            self,
            'FargateTask',
            cpu=256,
            memory_limit_mib=512,
        )

        ContainerDef = ecs.ContainerDefinition(
            self,
            'ContainerDef',
            image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"),
            task_definition=FargateTask,
        )

        ContainerDef.add_port_mappings(ecs.PortMapping(container_port=80))

        ecs.FargateService(
            self,
            'awscookbook207Service',
            cluster=ecs_cluster,
            task_definition=FargateTask,
            assign_public_ip=False,
            desired_count=1,
            enable_ecs_managed_tags=False,
            # health_check_grace_period=core.Duration.seconds(60),
            max_healthy_percent=100,
            min_healthy_percent=0,
            platform_version=ecs.FargatePlatformVersion('LATEST'),
            security_groups=[fargate_service_security_group],
            service_name='awscookbook207Service',
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType('PRIVATE_WITH_NAT')))

        # outputs

        CfnOutput(self, 'VpcId', value=vpc.vpc_id)

        CfnOutput(self, 'EcsClusterName', value=ecs_cluster.cluster_name)

        public_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PUBLIC)

        CfnOutput(self,
                  'VpcPublicSubnets',
                  value=', '.join(map(str, public_subnets.subnet_ids)))

        CfnOutput(self,
                  'AppSgId',
                  value=fargate_service_security_group.security_group_id)

        CfnOutput(self,
                  'ContainerIp',
                  value=fargate_service_security_group.security_group_id)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # ==============================
        # ======= CFN PARAMETERS =======
        # ==============================
        project_name_param = core.CfnParameter(scope=self,
                                               id='ProjectName',
                                               type='String')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = f'{project_name_param.value_as_string}-artifacts-{core.Aws.ACCOUNT_ID}'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        # ==================================================
        # ================= IAM ROLE =======================
        # ==================================================
        role = iam.Role(
            scope=self,
            id='TASKROLE',
            assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonS3FullAccess'))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonECS_FullAccess'))

        # ==================================================
        # ================== SECRET ========================
        # ==================================================
        db_password_secret = sm.Secret(
            scope=self,
            id='DBSECRET',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(
                password_length=20, exclude_punctuation=True))

        # ==================================================
        # ==================== VPC =========================
        # ==================================================
        public_subnet = ec2.SubnetConfiguration(
            name='Public', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=28)
        private_subnet = ec2.SubnetConfiguration(
            name='Private', subnet_type=ec2.SubnetType.PRIVATE, cidr_mask=28)
        isolated_subnet = ec2.SubnetConfiguration(
            name='DB', subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=28)

        vpc = ec2.Vpc(scope=self,
                      id='VPC',
                      cidr='10.0.0.0/24',
                      max_azs=2,
                      nat_gateway_provider=ec2.NatProvider.gateway(),
                      nat_gateways=1,
                      subnet_configuration=[
                          public_subnet, private_subnet, isolated_subnet
                      ])
        vpc.add_gateway_endpoint('S3Endpoint',
                                 service=ec2.GatewayVpcEndpointAwsService.S3)
        # ==================================================
        # ================= S3 BUCKET ======================
        # ==================================================
        artifact_bucket = s3.Bucket(
            scope=self,
            id='ARTIFACTBUCKET',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY)
        # # ==================================================
        # # ================== DATABASE  =====================
        # # ==================================================
        # Creates a security group for AWS RDS
        sg_rds = ec2.SecurityGroup(scope=self,
                                   id='SGRDS',
                                   vpc=vpc,
                                   security_group_name='sg_rds')
        # Adds an ingress rule which allows resources in the VPC's CIDR to access the database.
        sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('10.0.0.0/24'),
                                connection=ec2.Port.tcp(port))

        database = rds.DatabaseInstance(
            scope=self,
            id='MYSQL',
            database_name=db_name,
            port=port,
            credentials=rds.Credentials.from_username(
                username=username, password=db_password_secret.secret_value),
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.SMALL),
            vpc=vpc,
            security_groups=[sg_rds],
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED),
            # multi_az=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False)
        # ==================================================
        # =============== FARGATE SERVICE ==================
        # ==================================================
        cluster = ecs.Cluster(scope=self,
                              id='CLUSTER',
                              cluster_name=cluster_name,
                              vpc=vpc)

        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id='MLflow',
            task_role=role,
        )

        container = task_definition.add_container(
            id='Container',
            image=ecs.ContainerImage.from_asset(
                directory='container', repository_name=container_repo_name),
            environment={
                'BUCKET': f's3://{artifact_bucket.bucket_name}',
                'HOST': database.db_instance_endpoint_address,
                'PORT': str(port),
                'DATABASE': db_name,
                'USERNAME': username
            },
            secrets={
                'PASSWORD': ecs.Secret.from_secrets_manager(db_password_secret)
            })
        port_mapping = ecs.PortMapping(container_port=5000,
                                       host_port=5000,
                                       protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id='MLFLOW',
            service_name=service_name,
            cluster=cluster,
            task_definition=task_definition)

        # Setup security group
        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(
                peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                connection=ec2.Port.tcp(5000),
                description='Allow inbound from VPC for mlflow')

        # Setup autoscaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id='AUTOSCALING',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60))
        # ==================================================
        # =================== OUTPUTS ======================
        # ==================================================
        core.CfnOutput(
            scope=self,
            id='LoadBalancerDNS',
            value=fargate_service.load_balancer.load_balancer_dns_name)
示例#16
0
    def __init__(self, scope: core.Construct, id: str, x86_ecr_repo_name: str,
                 arm_ecr_repo_name: str, spec_file_path: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define CodeBuild resource.
        git_hub_source = codebuild.Source.git_hub(
            owner=GITHUB_REPO_OWNER,
            repo=GITHUB_REPO_NAME,
            webhook=True,
            webhook_filters=[
                codebuild.FilterGroup.in_event_of(
                    codebuild.EventAction.PULL_REQUEST_CREATED,
                    codebuild.EventAction.PULL_REQUEST_UPDATED,
                    codebuild.EventAction.PULL_REQUEST_REOPENED)
            ],
            clone_depth=1)

        # Define a IAM role for this stack.
        code_build_batch_policy = iam.PolicyDocument.from_json(
            code_build_batch_policy_in_json([id]))
        fuzz_policy = iam.PolicyDocument.from_json(
            code_build_fuzz_policy_in_json())
        inline_policies = {
            "code_build_batch_policy": code_build_batch_policy,
            "fuzz_policy": fuzz_policy
        }
        role = iam.Role(
            scope=self,
            id="{}-role".format(id),
            assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"),
            inline_policies=inline_policies)

        # Create the VPC for EFS and CodeBuild
        public_subnet = ec2.SubnetConfiguration(
            name="PublicFuzzingSubnet", subnet_type=ec2.SubnetType.PUBLIC)
        private_subnet = ec2.SubnetConfiguration(
            name="PrivateFuzzingSubnet", subnet_type=ec2.SubnetType.PRIVATE)

        # Create a VPC with a single public and private subnet in a single AZ. This is to avoid the elastic IP limit
        # being used up by a bunch of idle NAT gateways
        fuzz_vpc = ec2.Vpc(
            scope=self,
            id="{}-FuzzingVPC".format(id),
            subnet_configuration=[public_subnet, private_subnet],
            max_azs=1)
        build_security_group = ec2.SecurityGroup(
            scope=self, id="{}-FuzzingSecurityGroup".format(id), vpc=fuzz_vpc)

        build_security_group.add_ingress_rule(
            peer=build_security_group,
            connection=ec2.Port.all_traffic(),
            description="Allow all traffic inside security group")

        efs_subnet_selection = ec2.SubnetSelection(
            subnet_type=ec2.SubnetType.PRIVATE)

        # Create the EFS to store the corpus and logs. EFS allows new filesystems to burst to 100 MB/s for the first 2
        # TB of data read/written, after that the rate is limited based on the size of the filesystem. As of late
        # 2021 our corpus is less than one GB which results in EFS limiting all reads and writes to the minimum 1 MB/s.
        # To have the fuzzing be able to finish in a reasonable amount of time use the Provisioned capacity option.
        # For now this uses 100 MB/s which matches the performance used for 2021. Looking at EFS metrics in late 2021
        # during fuzz runs EFS sees 4-22 MB/s of transfers thus 100 MB/s gives lots of buffer and allows ~4-5 fuzz runs
        # to start at the same time with no issue.
        # https://docs.aws.amazon.com/efs/latest/ug/performance.html
        fuzz_filesystem = efs.FileSystem(
            scope=self,
            id="{}-FuzzingEFS".format(id),
            file_system_name="AWS-LC-Fuzz-Corpus",
            enable_automatic_backups=True,
            encrypted=True,
            security_group=build_security_group,
            vpc=fuzz_vpc,
            vpc_subnets=efs_subnet_selection,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.PROVISIONED,
            provisioned_throughput_per_second=core.Size.mebibytes(100),
        )

        # Create build spec.
        placeholder_map = {
            "X86_ECR_REPO_PLACEHOLDER": ecr_arn(x86_ecr_repo_name),
            "ARM_ECR_REPO_PLACEHOLDER": ecr_arn(arm_ecr_repo_name)
        }
        build_spec_content = YmlLoader.load(spec_file_path, placeholder_map)

        # Define CodeBuild.
        fuzz_codebuild = codebuild.Project(
            scope=self,
            id="FuzzingCodeBuild",
            project_name=id,
            source=git_hub_source,
            role=role,
            timeout=core.Duration.minutes(120),
            environment=codebuild.BuildEnvironment(
                compute_type=codebuild.ComputeType.LARGE,
                privileged=True,
                build_image=codebuild.LinuxBuildImage.STANDARD_4_0),
            build_spec=codebuild.BuildSpec.from_object(build_spec_content),
            vpc=fuzz_vpc,
            security_groups=[build_security_group])

        # TODO: add build type BUILD_BATCH when CFN finishes the feature release. See CryptoAlg-575.

        # Add 'BuildBatchConfig' property, which is not supported in CDK.
        # CDK raw overrides: https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codebuild-project.html#aws-resource-codebuild-project-properties
        cfn_codebuild = fuzz_codebuild.node.default_child
        cfn_codebuild.add_override("Properties.BuildBatchConfig", {
            "ServiceRole": role.role_arn,
            "TimeoutInMins": 120
        })

        # The EFS identifier needs to match tests/ci/common_fuzz.sh, CodeBuild defines an environment variable named
        # codebuild_$identifier.
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-projectfilesystemlocation.html
        #
        # TODO: add this to the CDK project above when it supports EfsFileSystemLocation
        cfn_codebuild.add_override("Properties.FileSystemLocations", [{
            "Identifier":
            "fuzzing_root",
            "Location":
            "%s.efs.%s.amazonaws.com:/" %
            (fuzz_filesystem.file_system_id, AWS_REGION),
            "MountPoint":
            "/efs_fuzzing_root",
            "Type":
            "EFS"
        }])
示例#17
0
    def __init__(self, scope: core.Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        # The code that defines your stack goes here

        vpc = ec2.Vpc(
            self,
            "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.ISOLATED,
                                        name="DB",
                                        cidr_mask=24)
            ],
            nat_gateways=2)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
            description="Specify the instance type you want").value_as_string

        key_name = CfnParameter(
            self,
            "ssh key",
            type="String",
            default="key_ireland",
            description="Specify your SSH key").value_as_string
        # Create Bastion Server
        bastion = ec2.BastionHostLinux(
            self,
            "Bastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="BastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create NLB
        nlb = elb.NetworkLoadBalancer(self,
                                      "emq-elb",
                                      vpc=vpc,
                                      internet_facing=True,
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        listener = nlb.add_listener("port1883", port=1883)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        asg = autoscaling.AutoScalingGroup(
            self,
            "emq-asg",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            health_check=HealthCheck.elb(grace=Duration.seconds(60)),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=4)

        user_defined_tags = self.node.try_get_context("tags")

        if user_defined_tags:
            tags = user_defined_tags.split(' ')
            core.Tags.of(asg).add(*tags)

        # NLB cannot associate with a security group therefore NLB object has no Connection object
        # Must modify manuall inbound rule of the newly created asg security group to allow access
        # from NLB IP only
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(1883),
            "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(18083),
                                            "Allow NLB access WEB UI")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")

        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from(bastion, ec2.Port.tcp(22),
                                   "Allow SSH from the bastion only")
        listener.add_targets("addTargetGroup", port=1883, targets=[asg])

        # @todo we need ssl terminataion
        # listenerUI.add_targets("addTargetGroup",
        #     port=18083,
        #     targets=[asg])
        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name)
示例#18
0
    def __init__(self, scope: core.Construct, construct_id: str, env, **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)
        
        # The code that defines your stack goes here
        if self.node.try_get_context("tags"):
            self.user_defined_tags = self.node.try_get_context("tags").split(' ')
        else:
            self.user_defined_tags = None

        vpc = ec2.Vpc(self, "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PUBLIC,
                name="Public",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PRIVATE,
                name="Private",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.ISOLATED,
                name="DB",
                cidr_mask=24
            )
            ],
            nat_gateways=2
            )
        self.vpc = vpc


        # Route53
        int_zone = r53.PrivateHostedZone(self, r53_zone_name,
                                         zone_name = 'int.emqx',
                                         vpc = vpc
        )

        self.int_zone = int_zone

        # Define cfn parameters
        # ec2_type = CfnParameter(self, "ec2-instance-type",
        #     type="String", default="m5.2xlarge",
        #     description="Specify the instance type you want").value_as_string
        
        key_name = CfnParameter(self, "ssh key",
            type="String", default="key_ireland",
            description="Specify your SSH key").value_as_string

        sg = ec2.SecurityGroup(self, id = 'sg_int', vpc = vpc)
        self.sg = sg

        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'SSH frm anywhere')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(1883), 'MQTT TCP Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8883), 'MQTT TCP/TLS Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.udp(14567), 'MQTT Quic Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(18083), 'WEB UI')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4369), 'EMQX dist port 1')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4370), 'EMQX dist port 2')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8081), 'EMQX dashboard')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2379), 'etcd client port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2380), 'etcd peer port')

         # Create Bastion Server
        bastion = ec2.BastionHostLinux(self, "Bastion",
                                       vpc=vpc,
                                       subnet_selection=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                                       instance_name="BastionHostLinux",
                                       instance_type=ec2.InstanceType(instance_type_identifier="t3.nano"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(
            ec2.Port.tcp(22), "Internet access SSH")
    
        # Create NLB
        nlb = elb.NetworkLoadBalancer(self, "emq-elb",
                                      vpc=vpc,
                                      internet_facing=False, 
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        r53.ARecord(self, "AliasRecord",
                    zone = int_zone,
                    record_name = loadbalancer_dnsname,
                    target = r53.RecordTarget.from_alias(r53_targets.LoadBalancerTarget(nlb))
                    )

        self.nlb = nlb

        listener = nlb.add_listener("port1883", port=1883)
        listenerTLS = nlb.add_listener("port8883", port=8883) # TLS, emqx terminataion
        listenerQuic = nlb.add_listener("port14567", port=14567, protocol=elbv2.Protocol.UDP)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        # asg = autoscaling.AutoScalingGroup(self, "emq-asg",
        #                                    vpc=vpc,
        #                                    vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
        #                                    instance_type=ec2.InstanceType(
        #                                        instance_type_identifier=ec2_type),
        #                                    machine_image=linux_ami,
        #                                    security_group = sg,
        #                                    key_name=key_name,
        #                                    user_data=ec2.UserData.custom(user_data),
        #                                    health_check=HealthCheck.elb(grace=Duration.seconds(60)),
        #                                    desired_capacity=3,
        #                                    min_capacity=2,
        #                                    max_capacity=4
        # )

        # if self.user_defined_tags:
        #     core.Tags.of(asg).add(*self.user_defined_tags)

        # # NLB cannot associate with a security group therefore NLB object has no Connection object
        # # Must modify manuall inbound rule of the newly created asg security group to allow access
        # # from NLB IP only
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(1883), "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(18083), "Allow NLB access WEB UI")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(8081), "Allow emqx cluster dashboard access")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from(bastion,
        #     ec2.Port.tcp(22), "Allow SSH from the bastion only")

        self.setup_emqx(numEmqx, vpc, int_zone, sg, key_name)

        listener.add_targets('ec2',
                             port=1883,
                             targets=
                                 [ target.InstanceTarget(x)
                                   for x in self.emqx_vms])
        # @todo we need ssl terminataion
        listenerUI.add_targets('ec2',
                               port=18083,
                               targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerQuic.add_targets('ec2',
                                 port=14567,
                                 protocol=elbv2.Protocol.UDP,
                                 targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerTLS.add_targets('ec2',
                                port=8883,
                                targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        #self.setup_monitoring()

        self.setup_etcd(vpc, int_zone, sg, key_name)
        self.setup_loadgen(numLg, vpc, int_zone, sg, key_name, nlb.load_balancer_dns_name)

        self.setup_monitoring()

        core.CfnOutput(self, "Output",
            value=nlb.load_balancer_dns_name)
        core.CfnOutput(self, "SSH Entrypoint",
                       value=bastion.instance_public_ip)
        core.CfnOutput(self, "SSH cmds",
                       value="ssh -A -l ec2-user %s -L8888:%s:80 -L 9999:%s:80 -L 13000:%s:3000"
                       % (bastion.instance_public_ip, nlb.load_balancer_dns_name, self.mon_lb, self.mon_lb)
        )
示例#19
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        attachments_subnets = ec2.SubnetConfiguration(
                name="ATTACHMENTS",
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
                cidr_mask=28)

        isolated_subnets = ec2.SubnetConfiguration(
                name="ISOLATED",
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
                cidr_mask=28)

        public_subnets = ec2.SubnetConfiguration(
                name="PUBLIC",
                subnet_type=ec2.SubnetType.PUBLIC,
                cidr_mask=28)

        private_subnets = ec2.SubnetConfiguration(
                name="PRIVATE",
                subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT,
                cidr_mask=28)

        # create VPC
        vpc1 = ec2.Vpc(
            self,
            'AWS-Cookbook-210-VPC1',
            cidr='10.10.0.0/26',
            subnet_configuration=[isolated_subnets, attachments_subnets]
        )

        vpc1.add_interface_endpoint(
            'VPC1SSMInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssm'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc1.add_interface_endpoint(
            'VPC1EC2MessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ec2messages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )
        vpc1.add_interface_endpoint(
            'VPC1SSMMessagedInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssmmessages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )
        vpc2 = ec2.Vpc(
            self,
            'AWS-Cookbook-210-VPC2',
            cidr='10.10.0.128/25',
            subnet_configuration=[public_subnets, private_subnets, isolated_subnets, attachments_subnets]
        )

        vpc2.add_interface_endpoint(
            'VPC2SSMInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssm'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc2.add_interface_endpoint(
            'VPC2EC2MessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ec2messages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )
        vpc2.add_interface_endpoint(
            'VPC2SSMMessagedInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssmmessages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc3 = ec2.Vpc(
            self,
            'AWS-Cookbook-210-VPC3',
            cidr='10.10.0.64/26',
            subnet_configuration=[isolated_subnets, attachments_subnets]
        )

        vpc3.add_interface_endpoint(
            'VPC3SSMInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssm'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc3.add_interface_endpoint(
            'VPC3EC2MessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ec2messages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )
        vpc3.add_interface_endpoint(
            'VPC3SSMMessagedInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssmmessages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
        )

        iam_role = iam.Role(self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        iam_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2RoleforSSM"))

        security_group_instance1 = ec2.SecurityGroup(
            self,
            "Instance1SG",
            vpc=vpc1,
            allow_all_outbound=True
        )
        security_group_instance1.add_ingress_rule(
            ec2.Peer.any_ipv4(),
            ec2.Port.all_icmp()
        )

        instance1 = ec2.Instance(
            self,
            "Instance1",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc1,
            security_group=security_group_instance1
        )

        security_group_instance2 = ec2.SecurityGroup(
            self,
            "Instance2SG",
            vpc=vpc2,
            allow_all_outbound=True
        )
        security_group_instance2.add_ingress_rule(
            ec2.Peer.any_ipv4(),
            ec2.Port.all_icmp()
        )

        instance2 = ec2.Instance(
            self,
            "Instance2",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc2,
            security_group=security_group_instance2
        )

        security_group_instance3 = ec2.SecurityGroup(
            self,
            "Instance3SG",
            vpc=vpc3,
            allow_all_outbound=True
        )
        security_group_instance3.add_ingress_rule(
            ec2.Peer.any_ipv4(),
            ec2.Port.all_icmp()
        )

        instance3 = ec2.Instance(
            self,
            "Instance3",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc3,
            security_group=security_group_instance3
        )

        # outputs

        CfnOutput(
            self,
            'VpcId1',
            value=vpc1.vpc_id
        )

        CfnOutput(
            self,
            'VpcId2',
            value=vpc2.vpc_id
        )

        CfnOutput(
            self,
            'VpcId3',
            value=vpc3.vpc_id
        )

        CfnOutput(
            self,
            'InstanceId1',
            value=instance1.instance_id
        )

        CfnOutput(
            self,
            'InstanceId2',
            value=instance2.instance_id
        )

        CfnOutput(
            self,
            'InstanceId3',
            value=instance3.instance_id
        )

        vpc1_attachment_subnets = vpc1.select_subnets(subnet_group_name="ATTACHMENTS")

        CfnOutput(
            self,
            'AttachmentSubnetsVpc1',
            value=', '.join(map(str, vpc1_attachment_subnets.subnet_ids))
        )

        vpc2_attachment_subnets = vpc2.select_subnets(subnet_group_name="ATTACHMENTS")

        CfnOutput(
            self,
            'AttachmentSubnetsVpc2',
            value=', '.join(map(str, vpc2_attachment_subnets.subnet_ids))
        )

        vpc3_attachment_subnets = vpc3.select_subnets(subnet_group_name="ATTACHMENTS")

        CfnOutput(
            self,
            'AttachmentSubnetsVpc3',
            value=', '.join(map(str, vpc3_attachment_subnets.subnet_ids))
        )

        vpc1_isolated_subnets_list = vpc1.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)

        CfnOutput(
            self,
            'Vpc1RtId1',
            value=vpc1_isolated_subnets_list.subnets[0].route_table.route_table_id
        )

        CfnOutput(
            self,
            'Vpc1RtId2',
            value=vpc1_isolated_subnets_list.subnets[1].route_table.route_table_id
        )

        vpc2_public_subnets_list = vpc2.select_subnets(subnet_type=ec2.SubnetType.PUBLIC)

        CfnOutput(
            self,
            'Vpc2PublicSubnetId1',
            value=vpc2_public_subnets_list.subnets[0].subnet_id
        )

        CfnOutput(
            self,
            'Vpc2PublicSubnetId2',
            value=vpc2_public_subnets_list.subnets[1].subnet_id
        )

        CfnOutput(
            self,
            'Vpc2PublicRtId1',
            value=vpc2_public_subnets_list.subnets[0].route_table.route_table_id
        )

        CfnOutput(
            self,
            'Vpc2PublicRtId2',
            value=vpc2_public_subnets_list.subnets[1].route_table.route_table_id
        )

        vpc2_isolated_subnets_list = vpc2.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT)

        CfnOutput(
            self,
            'Vpc2RtId1',
            value=vpc2_isolated_subnets_list.subnets[0].route_table.route_table_id
        )

        CfnOutput(
            self,
            'Vpc2RtId2',
            value=vpc2_isolated_subnets_list.subnets[1].route_table.route_table_id
        )

        vpc3_isolated_subnets_list = vpc3.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)

        CfnOutput(
            self,
            'Vpc3RtId1',
            value=vpc3_isolated_subnets_list.subnets[0].route_table.route_table_id
        )

        CfnOutput(
            self,
            'Vpc3RtId2',
            value=vpc3_isolated_subnets_list.subnets[1].route_table.route_table_id
        )

        vpc2_attachment_subnets_list = vpc2.select_subnets(subnet_group_name="ATTACHMENTS")

        CfnOutput(
            self,
            'Vpc2AttachRtId1',
            value=vpc2_attachment_subnets_list.subnets[0].route_table.route_table_id
        )

        CfnOutput(
            self,
            'Vpc2AttachRtId2',
            value=vpc2_attachment_subnets_list.subnets[1].route_table.route_table_id
        )
示例#20
0
    def __init__(self, scope: core.App, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        subnets = []
        subnets.append(
            aws_ec2.SubnetConfiguration(name="DeviceSubnet",
                                        subnet_type=aws_ec2.SubnetType.PUBLIC,
                                        cidr_mask=24))

        vpc = aws_ec2.Vpc(self,
                          "DeviceVpc",
                          max_azs=2,
                          subnet_configuration=subnets)

        # Iterate the private subnets
        selection = vpc.select_subnets(subnet_type=aws_ec2.SubnetType.PUBLIC)

        sg = aws_ec2.SecurityGroup(
            self,
            id="FarGateSecGroup",
            vpc=vpc,
            allow_all_outbound=True,
            description="Allow access to virtual device",
            security_group_name="Virtual Device Security Group")

        sg.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                            connection=aws_ec2.Port.tcp(80))

        rnd_suffix = create_random_name(4).lower()

        # pipeline requires versioned bucket
        bucket = aws_s3.Bucket(self,
                               "SourceBucket",
                               bucket_name="{}-{}-{}".format(
                                   props['namespace'].lower(),
                                   core.Aws.ACCOUNT_ID, core.Aws.REGION),
                               versioned=True,
                               removal_policy=core.RemovalPolicy.DESTROY)

        # ssm parameter to get bucket name later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterBucketName",
            parameter_name=f"{props['namespace']}-bucket",
            string_value=bucket.bucket_name,
            description='IoT playground pipeline bucket')

        # ecr repo to push docker container into
        ecr = aws_ecr.Repository(self,
                                 "ECR",
                                 repository_name=f"{props['namespace']}",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        # codebuild project meant to run in pipeline
        cb_docker_build = aws_codebuild.PipelineProject(
            self,
            "DockerBuild",
            project_name=f"{props['namespace']}-Docker-Build",
            build_spec=aws_codebuild.BuildSpec.from_source_filename(
                filename='docker/docker_build_buildspec.yml'),
            environment=aws_codebuild.BuildEnvironment(privileged=True, ),

            # pass the ecr repo uri into the codebuild project so codebuild knows where to push
            environment_variables={
                'ecr':
                aws_codebuild.BuildEnvironmentVariable(
                    value=ecr.repository_uri),
                'tag':
                aws_codebuild.BuildEnvironmentVariable(value='virtual_device')
            },
            description='Pipeline for CodeBuild',
            timeout=core.Duration.minutes(10),
        )
        # codebuild iam permissions to read write s3
        bucket.grant_read_write(cb_docker_build)

        # codebuild permissions to interact with ecr
        ecr.grant_pull_push(cb_docker_build)

        ecs_cluster = aws_ecs.Cluster(self, 'DeviceCluster', vpc=vpc)

        fargate_task_def = aws_ecs.FargateTaskDefinition(
            self,
            'DeviceTaskDef',
            cpu=512,
            memory_limit_mib=1024
            #network_mode=aws_ecs.NetworkMode.AWS_VPC,
        )

        # fargate_task_def.add_to_task_role_policy(aws_iam.PolicyStatement(
        #     effect=aws_iam.Effect.ALLOW,
        #     actions=[
        #         "s3:PutObject"],
        #     resources=["*"]
        # ))

        fargate_task_def.add_to_execution_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=[
                                        "ecr:GetAuthorizationToken",
                                        "ecr:BatchCheckLayerAvailability",
                                        "ecr:GetDownloadUrlForLayer",
                                        "ecr:BatchGetImage",
                                        "logs:CreateLogStream",
                                        "logs:PutLogEvents"
                                    ],
                                    resources=["*"]))

        container_image = aws_ecs.EcrImage(repository=ecr,
                                           tag="virtual_device")

        logging = aws_ecs.AwsLogDriver(stream_prefix="virtual_device")

        container = fargate_task_def.add_container("DeviceContainer",
                                                   image=container_image,
                                                   cpu=512,
                                                   memory_limit_mib=1024,
                                                   logging=logging,
                                                   essential=True)

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           host_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        container.add_port_mappings(port_mapping)

        # The code that defines your stack goes here
        table = aws_dynamodb.Table(self,
                                   "DeviceFactoryCatalog",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=3,
                                   write_capacity=3)

        function = aws_lambda.Function(
            self,
            "DeviceFactoryLambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("../lambdas/device_factory_lambda"),
            timeout=Duration.minutes(1))

        function.add_environment("BUCKET_NAME", bucket.bucket_name)
        function.add_environment("ECS_CLUSTER", ecs_cluster.cluster_name)
        function.add_environment("ECS_TASK_DEF",
                                 fargate_task_def.task_definition_arn)
        function.add_environment("DDB_TABLE_DEVICE_CATALOG", table.table_name)
        function.add_environment("SUBNET_1", selection.subnets[0].subnet_id)
        function.add_environment("SUBNET_2", selection.subnets[1].subnet_id)
        function.add_environment("SEC_GROUP", sg.security_group_id)

        table.grant_read_write_data(function)

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iot:*"],
                                    resources=["*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:PutObject", "s3:GetObject"],
                resources=["{}/*".format(bucket.bucket_arn)]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iam:PassRole"],
                                    resources=["arn:aws:iam::*:role/*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["ecs:RunTask", "ecs:StopTask"],
                                    resources=["*"]))

        api_gtw = aws_apigateway.LambdaRestApi(
            self,
            id="DeviceFactoryApi",
            rest_api_name="DeviceFactoryApi",
            handler=function)

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterDeviceFactoryEndpoint",
            parameter_name=f"{props['namespace']}-devicefactoryendpoint",
            string_value=api_gtw.url,
            description='IoT playground device factory endpoint')

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterEcrUri",
            parameter_name=f"{props['namespace']}-ecruri",
            string_value=ecr.repository_uri,
            description='IoT playground ECR URI')

        # ssm parameter to get cluster name
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterClusterName",
            parameter_name=f"{props['namespace']}-clustername",
            string_value=ecs_cluster.cluster_name,
            description='IoT playground Cluster Name')

        core.CfnOutput(
            self,
            "EcrUri",
            description="ECR URI",
            value=ecr.repository_uri,
        )

        core.CfnOutput(self,
                       "S3Bucket",
                       description="S3 Bucket",
                       value=bucket.bucket_name)

        core.CfnOutput(self,
                       "DeviceFactoryEndpoint",
                       description="Device Factory Endpoint",
                       value=api_gtw.url)

        self.output_props = props.copy()
        self.output_props['bucket'] = bucket
        self.output_props['cb_docker_build'] = cb_docker_build
示例#21
0
    def __init__(self, scope: core.Construct, id: str, props: dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.max_azs = 3

        vpc = ec2.Vpc(
            self,
            f"{id}-{props['deploy_env']}-vpc",
            cidr=props['cidr'],
            default_instance_tenancy=ec2.DefaultInstanceTenancy.DEFAULT,
            enable_dns_hostnames=props['aws_dns'],
            enable_dns_support=props['aws_dns'],
            max_azs=self.max_azs,
            nat_gateways=self.max_azs if props['ha'] else 1,
            subnet_configuration=[
                ec2.SubnetConfiguration(name='public',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=20),
                ec2.SubnetConfiguration(name='app',
                                        subnet_type=ec2.SubnetType.PRIVATE,
                                        cidr_mask=20),
                ec2.SubnetConfiguration(name='data',
                                        subnet_type=ec2.SubnetType.PRIVATE,
                                        cidr_mask=20)
            ])

        flowlog_log_group = logs.LogGroup(
            self,
            f"{id}-{props['deploy_env']}-flowlog-log-group",
            log_group_name=f"/flowlogs/{props['deploy_env']}",
            retention=logs.RetentionDays.ONE_MONTH)

        iam_policy = iam.PolicyDocument(
            assign_sids=True,
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogStream", "logs:PutLogEvents",
                        "logs:DescribeLogGroups", "logs:DescribeLogStreams"
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[flowlog_log_group.log_group_arn])
            ])

        iam_role = iam.Role(
            self,
            f"{id}-{props['deploy_env']}-flowlog-role",
            assumed_by=iam.ServicePrincipal('vpc-flow-logs.amazonaws.com'),
            inline_policies={
                f"{id}-{props['deploy_env']}-flowlogs": iam_policy
            })

        flowlog = ec2.CfnFlowLog(
            self,
            f"{id}-{props['deploy_env']}-flowlog",
            deliver_logs_permission_arn=iam_role.role_arn,
            log_destination_type='cloud-watch-logs',
            log_group_name=f"/flowlogs/{props['deploy_env']}",
            traffic_type='ALL',
            resource_type='VPC',
            resource_id=vpc.vpc_id)
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        env = kwargs.get('env', {})
        if env['region'] == 'us-east-1':  # simple AD not in us-east-2
            # The VPC for simple AD
            simple_vpc = ec2.Vpc(self,
                                 'devassoc-auth-simple',
                                 max_azs=2,
                                 cidr='10.40.0.0/16',
                                 subnet_configuration=[
                                     ec2.SubnetConfiguration(
                                         name='simple-ad-demo-',
                                         subnet_type=ec2.SubnetType.PUBLIC,
                                         cidr_mask=24)
                                 ])
            core.Tags.of(simple_vpc).add('Name', 'devassoc-simple-ad-demo')

            self.vpc_id = simple_vpc.vpc_id
            core.CfnOutput(self, 'simple-vpc-id', value=simple_vpc.vpc_id)
            core.CfnOutput(self,
                           'simple-public-subnet-id-1',
                           value=simple_vpc.public_subnets[0].subnet_id)
            core.CfnOutput(
                self,
                'simple-public-subnet-az-1',
                value=simple_vpc.public_subnets[0].availability_zone)
            core.CfnOutput(self,
                           'simple-public-subnet-id-2',
                           value=simple_vpc.public_subnets[1].subnet_id)
            core.CfnOutput(
                self,
                'simple-public-subnet-az-2',
                value=simple_vpc.public_subnets[1].availability_zone)

            ad.CfnSimpleAD(self,
                           'simple-ad',
                           name='simple-ad-demo',
                           password='******',
                           size='Small',
                           vpc_settings={
                               "vpcId":
                               simple_vpc.vpc_id,
                               "subnetIds": [
                                   simple_vpc.public_subnets[0].subnet_id,
                                   simple_vpc.public_subnets[1].subnet_id
                               ]
                           })

        # The VPC for Microsoft AD
        microsoft_vpc = ec2.Vpc(self,
                                'devassoc-auth-microsoft',
                                max_azs=2,
                                cidr='10.30.0.0/16',
                                subnet_configuration=[
                                    ec2.SubnetConfiguration(
                                        name='microsoft-ad-demo-',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=24)
                                ])
        core.Tags.of(microsoft_vpc).add('Name', 'devassoc-microsoft-ad-demo')

        self.vpc_id = microsoft_vpc.vpc_id
        core.CfnOutput(self, 'microsoft-vpc-id', value=microsoft_vpc.vpc_id)
        core.CfnOutput(self,
                       'microsoft-public-subnet-id-1',
                       value=microsoft_vpc.public_subnets[0].subnet_id)
        core.CfnOutput(self,
                       'microsoft-public-subnet-az-1',
                       value=microsoft_vpc.public_subnets[0].availability_zone)
        core.CfnOutput(self,
                       'microsoft-public-subnet-id-2',
                       value=microsoft_vpc.public_subnets[1].subnet_id)
        core.CfnOutput(self,
                       'microsoft-public-subnet-az-2',
                       value=microsoft_vpc.public_subnets[1].availability_zone)

        ad.CfnMicrosoftAD(
            self,
            'microsoft-ad',
            name='corp.example.com',  # must be valid as a DNS name
            short_name='corp',  # console calls this "Directory NetBIOS name"
            password='******',
            edition='Standard',
            vpc_settings={
                "vpcId":
                microsoft_vpc.vpc_id,
                "subnetIds": [
                    microsoft_vpc.public_subnets[0].subnet_id,
                    microsoft_vpc.public_subnets[1].subnet_id
                ]
            })

        # There should be a Cloud Directory example here, but I couldn't find a CDK API

        cognito_user_pool = cognito.UserPool(
            self,
            'cognito-user-pool',
            user_pool_name='admin-group',
            sign_in_aliases={'username': True})
        core.Tags.of(cognito_user_pool).add('user', 'admin-user')
示例#23
0
    def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create SNS Topic for Operations Team):
        konstone_ops_team = _sns.Topic(self,
                                       "konstoneOpsTeam",
                                       display_name="KonStone 24x7 On Watsapp? Support",
                                       topic_name="konstoneOpsTeam"
                                       )

        # Add Subscription to SNS Topic
        konstone_ops_team.add_subscription(
            _subs.EmailSubscription("*****@*****.**")
        )

        # Create a MultiAZ VPC):
        vpc = _ec2.Vpc(
            self,
            "konstoneVpcId",
            cidr="10.111.0.0/16",
            max_azs=2,
            nat_gateways=0,
            subnet_configuration=[
                _ec2.SubnetConfiguration(
                    name="public", subnet_type=_ec2.SubnetType.PUBLIC
                )
            ]
        )

        # Read EC2 BootStrap Script
        try:
            with open("bootstrap_scripts/install_httpd.sh", mode="r") as file:
                user_data = file.read()
        except OSError:
            print('Unable to read UserData script')

        # Get the latest ami
        amzn_linux_ami = _ec2.MachineImage.latest_amazon_linux(
            generation=_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=_ec2.AmazonLinuxEdition.STANDARD,
            storage=_ec2.AmazonLinuxStorage.EBS,
            virtualization=_ec2.AmazonLinuxVirt.HVM
        )

        # WebServer Instance
        web_server = _ec2.Instance(self,
                                   "WebServer004Id",
                                   instance_type=_ec2.InstanceType(
                                       instance_type_identifier="t2.micro"),
                                   instance_name="WebServer004",
                                   machine_image=amzn_linux_ami,
                                   vpc=vpc,
                                   vpc_subnets=_ec2.SubnetSelection(
                                       subnet_type=_ec2.SubnetType.PUBLIC
                                   ),
                                   user_data=_ec2.UserData.custom(user_data)
                                   )

        # Allow Web Traffic to WebServer
        web_server.connections.allow_from_any_ipv4(
            _ec2.Port.tcp(80), description="Allow Web Traffic"
        )

        # Add permission to web server instance profile
        web_server.role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore")
        )

        # Read Lambda Code
        try:
            with open("serverless_stacks/lambda_src/konstone_processor.py", mode="r") as f:
                konstone_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")

        # Simple Lambda Function to return event
        konstone_fn = _lambda.Function(self,
                                       "konstoneFunction",
                                       function_name="konstone_function",
                                       runtime=_lambda.Runtime.PYTHON_3_7,
                                       handler="index.lambda_handler",
                                       code=_lambda.InlineCode(
                                           konstone_fn_code),
                                       timeout=core.Duration.seconds(3),
                                       reserved_concurrent_executions=1,
                                       environment={
                                           "LOG_LEVEL": "INFO",
                                           "AUTOMATION": "SKON"
                                       }
                                       )

        # EC2 Metric for Avg. CPU
        ec2_metric_for_avg_cpu = _cloudwatch.Metric(
            namespace="AWS/EC2",
            metric_name="CPUUtilization",
            dimensions={
                "InstanceId": web_server.instance_id
            },
            period=core.Duration.minutes(5)
        )

        # Low CPU Alarm for Web Server
        low_cpu_alarm = _cloudwatch.Alarm(
            self,
            "lowCPUAlarm",
            alarm_description="Alert if CPU is less than 10%",
            alarm_name="low-cpu-alarm",
            actions_enabled=True,
            metric=ec2_metric_for_avg_cpu,
            threshold=10,
            comparison_operator=_cloudwatch.ComparisonOperator.LESS_THAN_OR_EQUAL_TO_THRESHOLD,
            evaluation_periods=1,
            datapoints_to_alarm=1,
            period=core.Duration.minutes(5),
            treat_missing_data=_cloudwatch.TreatMissingData.NOT_BREACHING
        )

        # Inform SNS on EC2 Alarm State
        low_cpu_alarm.add_alarm_action(
            _cloudwatch_actions.SnsAction(
                konstone_ops_team
            )
        )

        # Create Lambda Alarm
        konstone_fn_error_alarm = _cloudwatch.Alarm(
            self,
            "konstoneFunctionErrorAlarm",
            metric=konstone_fn.metric_errors(),
            threshold=2,
            evaluation_periods=1,
            datapoints_to_alarm=1,
            period=core.Duration.minutes(5)
        )

        # Inform SNS on Lambda Alarm State
        konstone_fn_error_alarm.add_alarm_action(
            _cloudwatch_actions.SnsAction(
                konstone_ops_team
            )
        )
示例#24
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        ip_parameter = core.CfnParameter(self,
                                         "SSRFSGAllowedIP",
                                         type="String")
        instance_kp_parameter = core.CfnParameter(
            self, "SSRFInstanceKP", type="AWS::EC2::KeyPair::KeyName")

        ssrf_s3_bucket = s3.Bucket(self, "SSRFS3Bucket")

        ssrf_s3_role = iam.Role(
            self,
            "SSRFS3Role",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        ssrf_s3_policy = iam.Policy(
            self,
            "SSRFS3Policy",
            document=iam.PolicyDocument(statements=[
                iam.PolicyStatement(actions=["s3:ListBucket"],
                                    resources=[ssrf_s3_bucket.bucket_arn]),
                iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[ssrf_s3_bucket.bucket_arn + "/*"])
            ]),
            roles=[ssrf_s3_role])

        vpc = ec2.Vpc(self,
                      "VPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        ec2_machine_image = ec2.MachineImage.lookup(name="bitnami-lampstack*")

        ssrf_sg = ec2.SecurityGroup(
            self,
            "SSRFSG",
            description="Security Group that allows HTTP traffic",
            vpc=vpc)

        ssrf_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4(ip_parameter.value_as_string + "/32"),
            connection=ec2.Port.tcp(port=80))

        with open("./caponeme-cdk/user_data.sh") as f:
            ec2_user_data = f.read()

        ssrf_instance = ec2.Instance(
            self,
            "SSRFInstance",
            vpc=vpc,
            security_group=ssrf_sg,
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"),
            machine_image=ec2_machine_image,
            user_data=ec2.UserData.custom(ec2_user_data),
            role=ssrf_s3_role,
            key_name=instance_kp_parameter.value_as_string)

        output_bucket_name = core.CfnOutput(self,
                                            "SSRFS3BucketOutput",
                                            value=ssrf_s3_bucket.bucket_name)
        output_ssrf_web_url = core.CfnOutput(
            self,
            "SSRFWebURL",
            value="http://" + ssrf_instance.instance_public_dns_name)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if self.node.try_get_context('vpc_type'):
            validate_cdk_json(self)

        ES_LOADER_TIMEOUT = 600
        ######################################################################
        # ELB mapping
        ######################################################################
        elb_id_temp = region_info.FactName.ELBV2_ACCOUNT
        elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp)
        elb_mapping = {}
        for key in elb_map_temp:
            elb_mapping[key] = {'accountid': elb_map_temp[key]}
        elb_accounts = core.CfnMapping(
            scope=self, id='ELBv2AccountMap', mapping=elb_mapping)
        ######################################################################
        # get params
        ######################################################################
        allow_source_address = core.CfnParameter(
            self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*',
            description='Space-delimited list of CIDR blocks',
            default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16')
        sns_email = core.CfnParameter(
            self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*',
            description=('Input your email as SNS topic, where Amazon ES will '
                         'send alerts to'),
            default='*****@*****.**')
        geoip_license_key = core.CfnParameter(
            self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$',
            default='xxxxxxxxxxxxxxxx',
            description=("If you wolud like to enrich geoip locaiton such as "
                         "IP address's country, get a license key form MaxMind"
                         " and input the key. If you not, keep "
                         "xxxxxxxxxxxxxxxx"))
        reserved_concurrency = core.CfnParameter(
            self, 'ReservedConcurrency', default=10, type='Number',
            description=('Input reserved concurrency. Increase this value if '
                         'there are steady logs delay despite no errors'))
        aes_domain_name = self.node.try_get_context('aes_domain_name')
        bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}'
        s3bucket_name_geo = f'{bucket}-geo'
        s3bucket_name_log = f'{bucket}-log'
        s3bucket_name_snapshot = f'{bucket}-snapshot'

        # organizations / multiaccount
        org_id = self.node.try_get_context('organizations').get('org_id')
        org_mgmt_id = self.node.try_get_context(
            'organizations').get('management_id')
        org_member_ids = self.node.try_get_context(
            'organizations').get('member_ids')
        no_org_ids = self.node.try_get_context(
            'no_organizations').get('aws_accounts')

        temp_geo = self.node.try_get_context('s3_bucket_name').get('geo')
        if temp_geo:
            s3bucket_name_geo = temp_geo
        temp_log = self.node.try_get_context('s3_bucket_name').get('log')
        if temp_log:
            s3bucket_name_log = temp_log
        elif org_id or no_org_ids:
            s3bucket_name_log = f'{aes_domain_name}-{self.account}-log'
        temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot')
        if temp_snap:
            s3bucket_name_snapshot = temp_snap

        kms_cmk_alias = self.node.try_get_context('kms_cmk_alias')
        if not kms_cmk_alias:
            kms_cmk_alias = 'aes-siem-key'

        ######################################################################
        # deploy VPC when context is defined as using VPC
        ######################################################################
        # vpc_type is 'new' or 'import' or None
        vpc_type = self.node.try_get_context('vpc_type')

        if vpc_type == 'new':
            is_vpc = True
            vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block')
            subnet_cidr_mask = int(
                self.node.try_get_context('new_vpc_subnet_cidr_mask'))
            is_vpc = True
            # VPC
            vpc_aes_siem = aws_ec2.Vpc(
                self, 'VpcAesSiem', cidr=vpc_cidr,
                max_azs=3, nat_gateways=0,
                subnet_configuration=[
                    aws_ec2.SubnetConfiguration(
                        subnet_type=aws_ec2.SubnetType.ISOLATED,
                        name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)])
            subnet1 = vpc_aes_siem.isolated_subnets[0]
            subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}]
            vpc_subnets = aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED)
            vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options
            vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
            for subnet in vpc_aes_siem.isolated_subnets:
                subnet_opt = subnet.node.default_child.cfn_options
                subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
        elif vpc_type == 'import':
            vpc_id = self.node.try_get_context('imported_vpc_id')
            vpc_aes_siem = aws_ec2.Vpc.from_lookup(
                self, 'VpcAesSiem', vpc_id=vpc_id)

            subnet_ids = get_subnet_ids(self)
            subnets = []
            for number, subnet_id in enumerate(subnet_ids, 1):
                obj_id = 'Subenet' + str(number)
                subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id)
                subnets.append(subnet)
            subnet1 = subnets[0]
            vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets)

        if vpc_type:
            is_vpc = True
            # Security Group
            sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcNoinboundSecurityGroup',
                security_group_name='aes-siem-noinbound-vpc-sg',
                vpc=vpc_aes_siem)

            sg_vpc_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcSecurityGroup',
                security_group_name='aes-siem-vpc-sg',
                vpc=vpc_aes_siem)
            sg_vpc_aes_siem.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block),
                connection=aws_ec2.Port.tcp(443),)
            sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options
            sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

            # VPC Endpoint
            vpc_aes_siem.add_gateway_endpoint(
                'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3,
                subnets=subnets)
            vpc_aes_siem.add_interface_endpoint(
                'SQSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,)
            vpc_aes_siem.add_interface_endpoint(
                'KMSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,)
            vpc_aes_siem.add_interface_endpoint(
                'SNSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SNS,)
        else:
            is_vpc = False

        is_vpc = core.CfnCondition(
            self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True))
        """
        CloudFormation実行時の条件式の書き方
        ClassのBasesが aws_cdk.core.Resource の時は、
        node.default_child.cfn_options.condition = is_vpc
        ClassのBasesが aws_cdk.core.CfnResource の時は、
        cfn_options.condition = is_vpc
        """

        ######################################################################
        # create cmk of KMS to encrypt S3 bucket
        ######################################################################
        kms_aes_siem = aws_kms.Key(
            self, 'KmsAesSiemLog', description='CMK for SIEM solution',
            removal_policy=core.RemovalPolicy.RETAIN)

        aws_kms.Alias(
            self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias,
            target_key=kms_aes_siem,
            removal_policy=core.RemovalPolicy.RETAIN)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow GuardDuty to use the key',
                actions=['kms:GenerateDataKey'],
                principals=[aws_iam.ServicePrincipal(
                    'guardduty.amazonaws.com')],
                resources=['*'],),)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow VPC Flow Logs to use the key',
                actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*',
                         'kms:GenerateDataKey*', 'kms:DescribeKey'],
                principals=[aws_iam.ServicePrincipal(
                    'delivery.logs.amazonaws.com')],
                resources=['*'],),)
        # basic policy
        key_policy_basic1 = aws_iam.PolicyStatement(
            sid='Allow principals in the account to decrypt log files',
            actions=['kms:DescribeKey', 'kms:ReEncryptFrom'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_basic1)

        # for Athena
        key_policy_athena = aws_iam.PolicyStatement(
            sid='Allow Athena to query s3 objects with this key',
            actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt',
                     'kms:GenerateDataKey*', 'kms:ReEncrypt*'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],
            conditions={'ForAnyValue:StringEquals': {
                'aws:CalledVia': 'athena.amazonaws.com'}})
        kms_aes_siem.add_to_resource_policy(key_policy_athena)

        # for CloudTrail
        key_policy_trail1 = aws_iam.PolicyStatement(
            sid='Allow CloudTrail to describe key',
            actions=['kms:DescribeKey'],
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_trail1)

        key_policy_trail2 = aws_iam.PolicyStatement(
            sid=('Allow CloudTrail to encrypt logs'),
            actions=['kms:GenerateDataKey*'],
            principals=[aws_iam.ServicePrincipal(
                'cloudtrail.amazonaws.com')],
            resources=['*'],
            conditions={'StringLike': {
                'kms:EncryptionContext:aws:cloudtrail:arn': [
                    f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}})
        kms_aes_siem.add_to_resource_policy(key_policy_trail2)

        ######################################################################
        # create s3 bucket
        ######################################################################
        block_pub = aws_s3.BlockPublicAccess(
            block_public_acls=True,
            ignore_public_acls=True,
            block_public_policy=True,
            restrict_public_buckets=True
        )
        s3_geo = aws_s3.Bucket(
            self, 'S3BucketForGeoip', block_public_access=block_pub,
            bucket_name=s3bucket_name_geo,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for log collector
        s3_log = aws_s3.Bucket(
            self, 'S3BucketForLog', block_public_access=block_pub,
            bucket_name=s3bucket_name_log, versioned=True,
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for aes snapshot
        s3_snapshot = aws_s3.Bucket(
            self, 'S3BucketForSnapshot', block_public_access=block_pub,
            bucket_name=s3bucket_name_snapshot,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        ######################################################################
        # IAM Role
        ######################################################################
        # snaphot rule for AES
        policydoc_snapshot = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['s3:ListBucket'],
                    resources=[s3_snapshot.bucket_arn]
                ),
                aws_iam.PolicyStatement(
                    actions=['s3:GetObject', 's3:PutObject',
                             's3:DeleteObject'],
                    resources=[s3_snapshot.bucket_arn + '/*']
                )
            ]
        )
        aes_siem_snapshot_role = aws_iam.Role(
            self, 'AesSiemSnapshotRole',
            role_name='aes-siem-snapshot-role',
            inline_policies=[policydoc_snapshot, ],
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        policydoc_assume_snapshrole = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['iam:PassRole'],
                    resources=[aes_siem_snapshot_role.role_arn]
                ),
            ]
        )

        aes_siem_deploy_role_for_lambda = aws_iam.Role(
            self, 'AesSiemDeployRoleForLambda',
            role_name='aes-siem-deploy-role-for-lambda',
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonESFullAccess'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        if vpc_type:
            aes_siem_deploy_role_for_lambda.add_managed_policy(
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaVPCAccessExecutionRole')
            )

        # for alert from Amazon ES
        aes_siem_sns_role = aws_iam.Role(
            self, 'AesSiemSnsRole',
            role_name='aes-siem-sns-role',
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        # EC2 role
        aes_siem_es_loader_ec2_role = aws_iam.Role(
            self, 'AesSiemEsLoaderEC2Role',
            role_name='aes-siem-es-loader-for-ec2',
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        )

        aws_iam.CfnInstanceProfile(
            self, 'AesSiemEsLoaderEC2InstanceProfile',
            instance_profile_name=aes_siem_es_loader_ec2_role.role_name,
            roles=[aes_siem_es_loader_ec2_role.role_name]
        )

        ######################################################################
        # in VPC
        ######################################################################
        aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/')
        if vpc_type and not aes_role_exist:
            slr_aes = aws_iam.CfnServiceLinkedRole(
                self, 'AWSServiceRoleForAmazonElasticsearchService',
                aws_service_name='es.amazonaws.com',
                description='Created by cloudformation of aes-siem stack'
            )
            slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # SQS for es-laoder's DLQ
        ######################################################################
        sqs_aes_siem_dlq = aws_sqs.Queue(
            self, 'AesSiemDlq', queue_name='aes-siem-dlq',
            retention_period=core.Duration.days(14))

        sqs_aes_siem_splitted_logs = aws_sqs.Queue(
            self, 'AesSiemSqsSplitLogs',
            queue_name='aes-siem-sqs-splitted-logs',
            dead_letter_queue=aws_sqs.DeadLetterQueue(
                max_receive_count=2, queue=sqs_aes_siem_dlq),
            visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            retention_period=core.Duration.days(14))

        ######################################################################
        # Setup Lambda
        ######################################################################
        # setup lambda of es_loader
        lambda_es_loader_vpc_kwargs = {}
        if vpc_type:
            lambda_es_loader_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': vpc_subnets,
            }

        lambda_es_loader = aws_lambda.Function(
            self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs,
            function_name='aes-siem-es-loader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            # code=aws_lambda.Code.asset('../lambda/es_loader.zip'),
            code=aws_lambda.Code.asset('../lambda/es_loader'),
            handler='index.lambda_handler',
            memory_size=2048,
            timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            reserved_concurrent_executions=(
                reserved_concurrency.value_as_number),
            dead_letter_queue_enabled=True,
            dead_letter_queue=sqs_aes_siem_dlq,
            environment={
                'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info',
                'POWERTOOLS_LOGGER_LOG_EVENT': 'false',
                'POWERTOOLS_SERVICE_NAME': 'es-loader',
                'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'})
        es_loader_newver = lambda_es_loader.add_version(
            name=__version__, description=__version__)
        es_loader_opt = es_loader_newver.node.default_child.cfn_options
        es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # send only
        # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage')
        # send and reieve. but it must be loop
        sqs_aes_siem_dlq.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        sqs_aes_siem_splitted_logs.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        lambda_es_loader.add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                sqs_aes_siem_splitted_logs, batch_size=1))

        # es-loaer on EC2 role
        sqs_aes_siem_dlq.grant(
            aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*',
            'sqs:ReceiveMessage*', 'sqs:DeleteMessage*')

        lambda_geo = aws_lambda.Function(
            self, 'LambdaGeoipDownloader',
            function_name='aes-siem-geoip-downloader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('../lambda/geoip_downloader'),
            handler='index.lambda_handler',
            memory_size=320,
            timeout=core.Duration.seconds(300),
            environment={
                's3bucket_name': s3bucket_name_geo,
                'license_key': geoip_license_key.value_as_string,
            }
        )
        lambda_geo_newver = lambda_geo.add_version(
            name=__version__, description=__version__)
        lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options
        lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # setup elasticsearch
        ######################################################################
        lambda_deploy_es = aws_lambda.Function(
            self, 'LambdaDeployAES',
            function_name='aes-siem-deploy-aes',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_domain_handler',
            memory_size=128,
            timeout=core.Duration.seconds(720),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        if vpc_type:
            lambda_deploy_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_deploy_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_deploy_es.add_environment('vpc_subnet_id', 'None')
            lambda_deploy_es.add_environment('security_group_id', 'None')
        deploy_es_newver = lambda_deploy_es.add_version(
            name=__version__, description=__version__)
        deploy_es_opt = deploy_es_newver.node.default_child.cfn_options
        deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # execute lambda_deploy_es to deploy Amaozon ES Domain
        aes_domain = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainDeployedR2',
            service_token=lambda_deploy_es.function_arn,)
        aes_domain.add_override('Properties.ConfigVersion', __version__)
        aes_domain.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_endpoint = aes_domain.get_att('es_endpoint').to_string()
        lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint)
        lambda_es_loader.add_environment(
            'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url)

        lambda_configure_es_vpc_kwargs = {}
        if vpc_type:
            lambda_configure_es_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), }
        lambda_configure_es = aws_lambda.Function(
            self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs,
            function_name='aes-siem-configure-aes',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_config_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
                'es_endpoint': es_endpoint,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        if vpc_type:
            lambda_configure_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_configure_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_configure_es.add_environment('vpc_subnet_id', 'None')
            lambda_configure_es.add_environment('security_group_id', 'None')
        configure_es_newver = lambda_configure_es.add_version(
            name=__version__, description=__version__)
        configure_es_opt = configure_es_newver.node.default_child.cfn_options
        configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        aes_config = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainConfiguredR2',
            service_token=lambda_configure_es.function_arn,
        )
        aes_config.add_override('Properties.ConfigVersion', __version__)
        aes_config.add_depends_on(aes_domain)
        aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
                  f':domain/{aes_domain_name}')
        # grant permission to es_loader role
        inline_policy_to_load_entries_into_es = aws_iam.Policy(
            self, 'aes-siem-policy-to-load-entries-to-es',
            policy_name='aes-siem-policy-to-load-entries-to-es',
            statements=[
                aws_iam.PolicyStatement(
                    actions=['es:*'],
                    resources=[es_arn + '/*', ]),
            ]
        )
        lambda_es_loader.role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)
        aes_siem_es_loader_ec2_role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)

        # grant additional permission to es_loader role
        additional_kms_cmks = self.node.try_get_context('additional_kms_cmks')
        if additional_kms_cmks:
            inline_policy_access_to_additional_cmks = aws_iam.Policy(
                self, 'access_to_additional_cmks',
                policy_name='access_to_additional_cmks',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['kms:Decrypt'],
                        resources=sorted(set(additional_kms_cmks))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
        additional_buckets = self.node.try_get_context('additional_s3_buckets')

        if additional_buckets:
            buckets_list = []
            for bucket in additional_buckets:
                buckets_list.append(f'arn:aws:s3:::{bucket}')
                buckets_list.append(f'arn:aws:s3:::{bucket}/*')
            inline_policy_access_to_additional_buckets = aws_iam.Policy(
                self, 'access_to_additional_buckets',
                policy_name='access_to_additional_buckets',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'],
                        resources=sorted(set(buckets_list))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)

        kms_aes_siem.grant_decrypt(lambda_es_loader)
        kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role)

        ######################################################################
        # s3 notification and grant permisssion
        ######################################################################
        s3_geo.grant_read_write(lambda_geo)
        s3_geo.grant_read(lambda_es_loader)
        s3_geo.grant_read(aes_siem_es_loader_ec2_role)
        s3_log.grant_read(lambda_es_loader)
        s3_log.grant_read(aes_siem_es_loader_ec2_role)

        # create s3 notification for es_loader
        notification = aws_s3_notifications.LambdaDestination(lambda_es_loader)

        # assign notification for the s3 PUT event type
        # most log system use PUT, but also CLB use POST & Multipart Upload
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='AWSLogs/'))

        # For user logs, not AWS logs
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='UserLogs/'))

        # Download geoip to S3 once by executing lambda_geo
        get_geodb = aws_cloudformation.CfnCustomResource(
            self, 'ExecLambdaGeoipDownloader',
            service_token=lambda_geo.function_arn,
        )
        get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # Download geoip every day at 6PM UTC
        rule = aws_events.Rule(
            self, 'CwlRuleLambdaGeoipDownloaderDilly',
            schedule=aws_events.Schedule.cron(
                minute='20', hour='0', month='*', week_day='*', year='*'),
        )
        rule.add_target(aws_events_targets.LambdaFunction(lambda_geo))

        ######################################################################
        # bucket policy
        ######################################################################
        s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID
        bucket_policy_common1 = aws_iam.PolicyStatement(
            sid='ELB Policy',
            principals=[aws_iam.AccountPrincipal(
                account_id=elb_accounts.find_in_map(
                    core.Aws.REGION, 'accountid'))],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],)
        # NLB / ALB / R53resolver / VPC Flow Logs
        bucket_policy_elb1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_elb2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_common1)
        s3_log.add_to_resource_policy(bucket_policy_elb1)
        s3_log.add_to_resource_policy(bucket_policy_elb2)

        # CloudTrail
        bucket_policy_trail1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For Cloudtrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],)
        bucket_policy_trail2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For CloudTrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_trail1)
        s3_log.add_to_resource_policy(bucket_policy_trail2)

        # GuardDuty
        bucket_policy_gd1 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to use the getBucketLocation operation',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],)
        bucket_policy_gd2 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to upload objects to the bucket',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],)
        bucket_policy_gd5 = aws_iam.PolicyStatement(
            sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY,
            actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'],
            conditions={'Bool': {'aws:SecureTransport': 'false'}})
        bucket_policy_gd5.add_any_principal()
        s3_log.add_to_resource_policy(bucket_policy_gd1)
        s3_log.add_to_resource_policy(bucket_policy_gd2)
        s3_log.add_to_resource_policy(bucket_policy_gd5)

        # Config
        bucket_policy_config1 = aws_iam.PolicyStatement(
            sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_config2 = aws_iam.PolicyStatement(
            sid='AWSConfigBucketDelivery',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_config1)
        s3_log.add_to_resource_policy(bucket_policy_config2)

        # geoip
        bucket_policy_geo1 = aws_iam.PolicyStatement(
            sid='Allow geoip downloader and es-loader to read/write',
            principals=[lambda_es_loader.role, lambda_geo.role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_geo.bucket_arn + '/*'],)
        s3_geo.add_to_resource_policy(bucket_policy_geo1)

        # ES Snapshot
        bucket_policy_snapshot = aws_iam.PolicyStatement(
            sid='Allow ES to store snapshot',
            principals=[aes_siem_snapshot_role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_snapshot.bucket_arn + '/*'],)
        s3_snapshot.add_to_resource_policy(bucket_policy_snapshot)

        ######################################################################
        # for multiaccount / organizaitons
        ######################################################################
        if org_id or no_org_ids:
            ##################################################################
            # KMS key policy for multiaccount / organizaitons
            ##################################################################
            # for CloudTrail
            cond_tail2 = self.make_resource_list(
                path='arn:aws:cloudtrail:*:', tail=':trail/*',
                keys=self.list_without_none(org_mgmt_id, no_org_ids))
            key_policy_mul_trail2 = aws_iam.PolicyStatement(
                sid=('Allow CloudTrail to encrypt logs for multiaccounts'),
                actions=['kms:GenerateDataKey*'],
                principals=[aws_iam.ServicePrincipal(
                    'cloudtrail.amazonaws.com')],
                resources=['*'],
                conditions={'StringLike': {
                    'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}})
            kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2)

            # for replicaiton
            key_policy_rep1 = aws_iam.PolicyStatement(
                sid=('Enable cross account encrypt access for S3 Cross Region '
                     'Replication'),
                actions=['kms:Encrypt'],
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                resources=['*'],)
            kms_aes_siem.add_to_resource_policy(key_policy_rep1)

            ##################################################################
            # Buckdet Policy for multiaccount / organizaitons
            ##################################################################
            s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log

            # for CloudTrail
            s3_mulpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_org_trail = aws_iam.PolicyStatement(
                sid='AWSCloudTrailWrite for Multiaccounts / Organizations',
                principals=[
                    aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_mulpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_org_trail)

            # config
            s3_conf_multpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_mul_config2 = aws_iam.PolicyStatement(
                sid='AWSConfigBucketDelivery',
                principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_conf_multpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_mul_config2)

            # for replication
            bucket_policy_rep1 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on objects',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:ReplicateDelete', 's3:ReplicateObject',
                         's3:ReplicateTags', 's3:GetObjectVersionTagging',
                         's3:ObjectOwnerOverrideToBucketOwner'],
                resources=[f'{s3_log_bucket_arn}/*'])
            bucket_policy_rep2 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on bucket',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:List*', 's3:GetBucketVersioning',
                         's3:PutBucketVersioning'],
                resources=[f'{s3_log_bucket_arn}'])
            s3_log.add_to_resource_policy(bucket_policy_rep1)
            s3_log.add_to_resource_policy(bucket_policy_rep2)

        ######################################################################
        # SNS topic for Amazon ES Alert
        ######################################################################
        sns_topic = aws_sns.Topic(
            self, 'SnsTopic', topic_name='aes-siem-alert',
            display_name='AES SIEM')

        sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription(
            email_address=sns_email.value_as_string))
        sns_topic.grant_publish(aes_siem_sns_role)

        ######################################################################
        # output of CFn
        ######################################################################
        kibanaurl = f'https://{es_endpoint}/_plugin/kibana/'
        kibanaadmin = aes_domain.get_att('kibanaadmin').to_string()
        kibanapass = aes_domain.get_att('kibanapass').to_string()

        core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy',
                       value=aes_siem_deploy_role_for_lambda.role_arn)
        core.CfnOutput(self, 'KibanaUrl', export_name='kibana-url',
                       value=kibanaurl)
        core.CfnOutput(self, 'KibanaPassword', export_name='kibana-pass',
                       value=kibanapass,
                       description='Please change the password in Kibana ASAP')
        core.CfnOutput(self, 'KibanaAdmin', export_name='kibana-admin',
                       value=kibanaadmin)
示例#26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        app = App()
        cdk_stack = Stack(app, 'network')

        # Add a tag to all constructs in the stack
        Tag.add(cdk_stack, "Created By", "umut.yalcinkaya")

        # VPC
        vpc = ec2.Vpc(self,
                      "VPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])
        # Iterate the private subnets
        # selection = vpc.select_subnets(
        #     subnet_type=ec2.SubnetType.PRIVATE
        # )

        # for subnet in selection.subnets:
        #     pass

        # AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        # Instance
        instance = ec2.Instance(self,
                                "Instance",
                                instance_type=ec2.InstanceType("t3a.large"),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                role=role,
                                key_name="umut-poc")

        instance.instance.add_property_override(
            "BlockDeviceMappings", [{
                "DeviceName": "/dev/sdb",
                "Ebs": {
                    "VolumeSize": "30",
                    "VolumeType": "gp2",
                    "DeleteOnTermination": "true"
                }
            }])

        # Script in S3 as Asset
        asset = Asset(self,
                      "Asset",
                      path=os.path.join(dirname, "configure.sh"))
        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)

        # Userdata executes script from S3
        instance.user_data.add_execute_file_command(file_path=local_path)
        asset.grant_read(instance.role)
        # output information after deploy
        output = core.CfnOutput(self,
                                "BastionHost_information",
                                value=instance.instance_public_ip,
                                description="BastionHost's Public IP")
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 stage={},
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        customer = self.node.try_get_context("customer")
        stage_name = stage["stage_name"]
        vpc_cidr = stage["vpc_cidr"]
        vpc_prefix = stage["vpc_prefix"]
        prefix_name = f'{vpc_prefix}-{stage_name}-{customer}'
        subnet_prefix = int(stage['subnet_prefix'])
        max_azs = int(stage['max_azs'])
        nat_number = int(stage['nat_number'])
        layers = stage['layers']
        layer_endpoints = stage['layer_endpoints']
        layers_nat = stage['layer_nats']

        flag_public = False
        flag_private = False
        flag_isolated = False

        subnets_config = []
        for layer in layers:
            layer_type = layers[layer]
            if layer_type == 'PUBLIC':
                subnet_type = _ec2.SubnetType.PUBLIC
                flag_public = True
            if layer_type == 'PRIVATE':
                subnet_type = _ec2.SubnetType.PRIVATE
                flag_private = True
            if layer_type == 'ISOLATED':
                flag_isolated = True
                subnet_type = _ec2.SubnetType.ISOLATED
            subnets_config.append(
                _ec2.SubnetConfiguration(name=layer,
                                         subnet_type=subnet_type,
                                         cidr_mask=subnet_prefix))

        nat_subnets = None
        if layers_nat in layers and layers[layers_nat] == 'PUBLIC':
            nat_subnets = _ec2.SubnetSelection(subnet_group_name=layers_nat)

        vpc_tenacy = _ec2.DefaultInstanceTenancy.DEFAULT
        if self.node.try_get_context("vpc_tenacy") == 'DEDICATED':
            vpc_tenacy = _ec2.DefaultInstanceTenancy.DEDICATED

        subnet_layer_endpoints = [
            _ec2.SubnetSelection(one_per_az=True,
                                 subnet_group_name=layer_endpoints)
        ]

        self.vpc = _ec2.Vpc(
            self,
            prefix_name,
            max_azs=max_azs,
            cidr=vpc_cidr,
            subnet_configuration=subnets_config,
            nat_gateway_subnets=nat_subnets,
            nat_gateways=nat_number,
            default_instance_tenancy=vpc_tenacy,
            gateway_endpoints={
                "S3":
                _ec2.GatewayVpcEndpointOptions(
                    service=_ec2.GatewayVpcEndpointAwsService.S3,
                    subnets=subnet_layer_endpoints)
            })

        # tagging
        core.Tags.of(self.vpc.node.default_child).add("Name",
                                                      f'{prefix_name}-vpc')
        core.Tags.of(self.vpc.node.find_child('IGW')).add(
            "Name", f'{prefix_name}-igw')

        prisub = [prs for prs in self.vpc.private_subnets]
        pubsub = [pus for pus in self.vpc.public_subnets]
        isosub = [ios for ios in self.vpc.isolated_subnets]

        count = 1
        for nat in stage['nat_number']:
            core.Tags.of(
                self.vpc.node.find_child('publicSubnet' + str(count)).node.
                find_child('NATGateway')).add("Name", f'{prefix_name}-nat')
            core.Tags.of(
                self.vpc.node.find_child(
                    'publicSubnet' + str(count)).node.find_child("EIP")).add(
                        "Name", f'{prefix_name}-public-eip-{count}')
            count += 1

        count = 1
        for prs in prisub:
            az_end = prs.availability_zone[-2:]
            core.Tags.of(prs.node.default_child).add(
                "Name", f'{prefix_name}-private-{az_end}')
            core.Tags.of(
                self.vpc.node.find_child(
                    'privateSubnet' +
                    str(count)).node.find_child('RouteTable')).add(
                        "Name", f'{prefix_name}-private-rt-{az_end}')
            count += 1

        count = 1
        for pus in pubsub:
            az_end = pus.availability_zone[-2:]
            core.Tags.of(pus.node.default_child).add(
                "Name", f'{prefix_name}-public-{az_end}')
            core.Tags.of(
                self.vpc.node.find_child(
                    'publicSubnet' +
                    str(count)).node.find_child('RouteTable')).add(
                        "Name", f'{prefix_name}-public-rt-{az_end}')
            count += 1

        count = 1
        for ios in isosub:
            az_end = ios.availability_zone[-2:]
            core.Tags.of(ios.node.default_child).add(
                "Name", f'{prefix_name}-database-{az_end}')
            core.Tags.of(
                self.vpc.node.find_child(
                    'databaseSubnet' +
                    str(count)).node.find_child('RouteTable')).add(
                        "Name", f'{prefix_name}-database-rt-{az_end}')
            count += 1

        core.CfnOutput(self, "Output", value=self.vpc.vpc_id)
 def __init__(self, scope: core.Construct, id: str,UserName="******",EmailAddress="default",**kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     
     # vpc with one public subnet and one private subnet
     self.My_Vpc = _ec2.Vpc(self, "vpc-"+ UserName + "-batch",
         max_azs=2,
         nat_gateways=1,
         subnet_configuration=[
             _ec2.SubnetConfiguration(
                 subnet_type=_ec2.SubnetType.PUBLIC,
                 name="BatchENV",
                 cidr_mask=24
             ),
             _ec2.SubnetConfiguration(
                 cidr_mask=24,
                 name="InternalENV",
                 subnet_type=_ec2.SubnetType.PRIVATE
             )
         ]
     )
     
     # Definition Of S3 Bucket For Batch Computing
     self.My_S3_Bucket = _s3.Bucket(self,
         "s3bucket-" + UserName + "-batch",
         lifecycle_rules=[
             _s3.LifecycleRule(
                 # delete the files after 1800 days (5 years)
                 expiration=core.Duration.days(365),
                 transitions=[
                     # move files into glacier after 90 days
                     _s3.Transition(
                         transition_after=core.Duration.days(30),
                         storage_class=_s3.StorageClass.GLACIER
                     ),
                     _s3.Transition(
                         transition_after=core.Duration.days(120),
                         storage_class=_s3.StorageClass.DEEP_ARCHIVE
                     )
                 ],
             )
         ],
         removal_policy=core.RemovalPolicy.DESTROY
     )
     
     # Definition Of ECR Repo
     self.My_ECR_Repo = EcrENV(self,
         "ecr-" + UserName + "-batch",
         UserName=UserName
     )
     
     # Definition Of Batch ENV For Batch Computing
     self.My_Batch = BatchENV(self,
         "env-" + UserName + "-batch",
         CurrentVPC=self.My_Vpc,
         TargetS3=self.My_S3_Bucket,
         UserName=UserName
         
     )
     
     # Definition Of Batch Job 
     self.My_Batch_Task = BatchTASK(self,
         "task-" + UserName + "-batch",
         EcrRepo=self.My_ECR_Repo,
         UserName=UserName
     )
     
     # Definition Of Lambda Job 
     self.My_Lambda_Task = LambdaTask(self,
         "task-" + UserName + "-lambda",
         TargetS3=self.My_S3_Bucket
     )
     
     # Definition Of SNS Topic With Subscription 
     self.My_SNS = SnsENV(self,
         "sns-" + UserName + "-sfn",
         UserName=UserName,
         EmailAddress=EmailAddress
     )
     
     # Definition Of State Machine In Step functions  
     self.My_SFN = StepfunctionsENV(self,
         "statemachine-" + UserName + "-sfn",
         QueueDefine = self.My_Batch,
         TaskDefine = self.My_Batch_Task,
         LambdaDefine = self.My_Lambda_Task,
         SNSDefine = self.My_SNS
     )
     
     core.CfnOutput(self,
         "S3 Bucket For AWS Batch",
         value = self.My_S3_Bucket.bucket_name
     )
示例#29
0
 def define_isolated_subnet(self):
     return ec2.SubnetConfiguration(
         cidr_mask=24,
         name="shared_isolated",
         subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
     )
示例#30
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket
        s3_Bucket = s3.Bucket(
            self,
            "AWS-Cookbook-Recipe-405",
            removal_policy=RemovalPolicy.DESTROY
        )

        aws_s3_deployment.BucketDeployment(
            self,
            'S3Deployment',
            destination_bucket=s3_Bucket,
            sources=[aws_s3_deployment.Source.asset("./s3_content")],
            retain_on_delete=False
        )

        isolated_subnets = ec2.SubnetConfiguration(
            name="ISOLATED",
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
            cidr_mask=24
        )

        # create VPC
        vpc = ec2.Vpc(
            self,
            'AWS-Cookbook-VPC',
            cidr='10.10.0.0/23',
            subnet_configuration=[isolated_subnets]
        )

        vpc.add_interface_endpoint(
            'VPCSecretsManagerInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('secretsmanager'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_gateway_endpoint(
            's3GateWayEndPoint',
            service=ec2.GatewayVpcEndpointAwsService('s3'),
            subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)],
        )

        subnet_group = rds.SubnetGroup(
            self,
            'rds_subnet_group',
            description='VPC Subnet Group for RDS',
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            )
        )

        rds_security_group = ec2.SecurityGroup(
            self,
            'rds_security_group',
            description='Security Group for the RDS Instance',
            allow_all_outbound=True,
            vpc=vpc
        )

        db_name = 'AWSCookbookRecipe405'

        rds_instance = rds.DatabaseInstance(
            self,
            'DBInstance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_8_0_23
            ),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name=db_name,
            instance_identifier='awscookbook405db',
            delete_automated_backups=True,
            deletion_protection=False,
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group,
            security_groups=[rds_security_group]
        )

        # mkdir -p lambda-layers/sqlparse/python
        # cd layers/sqlparse/python
        # pip install sqlparse --target="."
        # cd ../../../

        # create Lambda Layer to use for RDS Rotation Arn
        pymysql = aws_lambda.LayerVersion(
            self,
            "pymysql",
            code=aws_lambda.AssetCode('lambda-layers/pymysql'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="pymysql",
            license="MIT"
        )

        # -------- Begin EC2 Helper ---------
        vpc.add_interface_endpoint(
            'VPCSSMInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssm'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_interface_endpoint(
            'VPCEC2MessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ec2messages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_interface_endpoint(
            'VPCSSMMessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssmmessages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
        )

        iam_role = iam.Role(self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        iam_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2RoleforSSM"))

        instance = ec2.Instance(
            self,
            "Instance",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc,
        )

        CfnOutput(
            self,
            'InstanceId',
            value=instance.instance_id
        )
        # -------- End EC2 Helper ---------

        iam_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("SecretsManagerReadWrite"))

        rds_instance.connections.allow_from(
            instance.connections, ec2.Port.tcp(3306), "Ingress")

        # outputs

        CfnOutput(
            self,
            'VpcId',
            value=vpc.vpc_id
        )

        CfnOutput(
            self,
            'PyMysqlLambdaLayerArn',
            value=pymysql.layer_version_arn
        )

        CfnOutput(
            self,
            'RdsDatabaseId',
            value=rds_instance.instance_identifier
        )

        CfnOutput(
            self,
            'RdsSecurityGroup',
            value=rds_security_group.security_group_id
        )

        CfnOutput(
            self,
            'RdsEndpoint',
            value=rds_instance.db_instance_endpoint_address
        )

        isolated_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)

        CfnOutput(
            self,
            'IsolatedSubnets',
            value=', '.join(map(str, isolated_subnets.subnet_ids))
        )

        CfnOutput(
            self,
            'DbName',
            value=db_name
        )

        CfnOutput(
            self,
            'RdsSecretArn',
            value=rds_instance.secret.secret_full_arn
        )