def __init__(self, scope: cdk.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)
        bucketName = CfnParameter(self, "BucketName")
        self.template_options.metadata = {
            'AWS::CloudFormation::Interface': {
                'ParameterGroups': [{
                    'Label': {
                        'default': 'Bucket Configuration'
                    },
                    'Parameters': [bucketName.logical_id]
                }],
                'ParameterLabels': {
                    bucketName.logical_id: {
                        'default': 'Which name should the bucket have'
                    }
                }
            }
        }

        bucket = Bucket(self,
                        'test-bucket',
                        bucket_name=bucketName.value_as_string)

        CfnOutput(self,
                  'S3Id',
                  value=bucket.bucket_arn,
                  export_name=Fn.sub('${AWS::StackName}-S3Id'))
    def _add_cfn_parameters(self):
        if (self.config.dev_settings and self.config.dev_settings.cookbook
                and self.config.dev_settings.cookbook.chef_cookbook):
            dev_settings_cookbook_value = self.config.dev_settings.cookbook.chef_cookbook
            custom_chef_cookbook = (
                create_s3_presigned_url(dev_settings_cookbook_value)
                if dev_settings_cookbook_value.startswith("s3://") else
                dev_settings_cookbook_value)
        else:
            custom_chef_cookbook = ""

        CfnParameter(
            self,
            "CfnParamCookbookVersion",
            type="String",
            default=utils.get_installed_version(),
            description="CookbookVersion",
        )
        CfnParameter(self,
                     "CfnParamChefCookbook",
                     type="String",
                     default=custom_chef_cookbook,
                     description="ChefCookbook")
        CfnParameter(self,
                     "CfnParamCincInstaller",
                     type="String",
                     default="",
                     description="CincInstaller")
        CfnParameter(
            self,
            "CfnParamChefDnaJson",
            type="String",
            default=ImageBuilderExtraChefAttributes(
                self.config.dev_settings).dump_json(),
            description="ChefAttributes",
        )
        CfnParameter(
            self,
            "CfnParamUpdateOsAndReboot",
            type="String",
            default="true"
            if self.config.build and self.config.build.update_os_packages
            and self.config.build.update_os_packages.enabled else "false",
            description="UpdateOsAndReboot",
        )
    def __init__(self, scope: core.Construct, id: str) -> None:
        super().__init__(scope, id)
        
        client_id = os.getenv('GOOGLE_CLIENT_ID')
        client_secret = os.getenv('GOOGLE_CLIENT_SECRET')
        if client_id is None or client_secret is None:
            print('Missing GOOGLE_CLIENT_ID or/and GOOGLE_CLIENT_SECRET environment varibles, please add them to .env file')
            exit(1)

        google_client_id = CfnParameter(scope, 'GoogleClientId', no_echo=True, default=client_id)
        google_client_secret = CfnParameter(scope, 'GoogleClientSecret', no_echo=True, default=client_secret)

        self.user_pool = UserPool(self, "UsersPool", sign_in_aliases=aws_cognito.SignInAliases(username=True), custom_attributes={"user_type": StringAttribute(max_len=256, mutable=True)})
        cfn_user_pool: CfnUserPool = self.user_pool.node.default_child
        cfn_user_pool.policies = CfnUserPool.PoliciesProperty(
            password_policy=CfnUserPool.PasswordPolicyProperty(minimum_length=8, require_lowercase=False, require_numbers=False,
                                                                           require_symbols=False, require_uppercase=False))
        
        self.user_pool.add_domain("JobliUserPoolDomain", cognito_domain=CognitoDomainOptions(domain_prefix=get_stack_name().lower()))
        user_pool_output = core.CfnOutput(self, id="JobliUserPoolID", value=self.user_pool.user_pool_id)
        user_pool_output.override_logical_id("JobliUserPoolID")
        user_pool_arn_output = core.CfnOutput(self, id="JobliUserPoolArn", value=self.user_pool.user_pool_arn)
        user_pool_arn_output.override_logical_id("JobliUserPoolArn")

        self.user_pool_identity_provider = UserPoolIdentityProviderGoogle(self, "JobliGoogleIdentityProvider", 
                                                                            client_id=google_client_id.value_as_string,
                                                                            client_secret=google_client_secret.value_as_string,
                                                                            scopes=['profile', 'email', 'openid', 'phone'], user_pool=self.user_pool,
                                                                            attribute_mapping=AttributeMapping(email=ProviderAttribute.GOOGLE_EMAIL))
        self.user_pool_client = UserPoolClient(
            self,
            "PoolClient",
            user_pool=self.user_pool,
            auth_flows=AuthFlow(admin_user_password=True, user_password=True), 
            o_auth=OAuthSettings(callback_urls=['jobli://', 'exp://127.0.0.1:19000/--/', 'http://localhost:19006/'], 
            flows=OAuthFlows(authorization_code_grant=True, implicit_code_grant=True), 
            scopes=[OAuthScope.PHONE, OAuthScope.EMAIL, OAuthScope.OPENID, OAuthScope.COGNITO_ADMIN, OAuthScope.PROFILE]),
            supported_identity_providers=[UserPoolClientIdentityProvider.GOOGLE] 
            )
        
        self.user_pool_client.node.add_dependency(self.user_pool_identity_provider)

        auth_client_output = core.CfnOutput(self, id="AuthClientID", value=self.user_pool_client.user_pool_client_id)
        auth_client_output.override_logical_id("AuthClientID")
    def _add_parameters(self):
        if self._condition_custom_cluster_dns():
            domain_name = AWSApi.instance(
            ).route53.get_hosted_zone_domain_name(
                self.config.scheduling.settings.dns.hosted_zone_id)
        else:
            domain_name = "pcluster."
        cluster_dns_domain = f"{self.stack_name}.{domain_name}"

        self.cluster_dns_domain = CfnParameter(
            self.stack_scope,
            "ClusterDNSDomain",
            description=
            "DNS Domain of the private hosted zone created within the cluster",
            default=cluster_dns_domain,
        )
Ejemplo n.º 5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        environment = CfnParameter(self,
                                   "Environment",
                                   type="String",
                                   default="dev",
                                   description="environment name")

        parameters = {'environment': environment.value_as_string}

        fruit = Fruit(self, parameters)
        fruit.function()

        veggies = Veggies(self, parameters)
        veggies.function()
Ejemplo n.º 6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        env = "dev"
        project = "testproject1"
        service = "etl"

        component = "workflow"
        Tag.add(self, "Service", service)
        Tag.add(self, "Component", component)

        param_dwh = CfnParameter(
            self,
            "ParamDWH",
            type="String",
            description="The domain of the DWH to connect to. | team=data,service=dwh",
            default="fakedwh.host",
        )

        value_raw = "import: ${value_to_import}, param: ${param_dwh}"
        value = Fn.sub(
            value_raw,
            {
                "value_to_import": Fn.import_value(
                    generate_resource_name(project, env, service, "buckets", "bucketb")
                ),
                "param_dwh": Fn.ref(param_dwh.logical_id),
            },
        )
        aws_ssm.StringParameter(
            self,
            "SSMParam",
            parameter_name=generate_resource_name(
                project, env, service, component, "ssmparam"
            ),
            string_value=value,
        )
Ejemplo n.º 7
0
#!/usr/bin/env python3
from aws_cdk.core import App, Stack, CfnParameter
from stack.base_stack import BaseStack
from stack.core_stack import CoreStack

app = App()

main_stack = Stack(app, 'Main')

email_param = CfnParameter(
    main_stack, 'email',
    description='email for sns subscription').value_as_string
app_stack = CoreStack(main_stack, 'AppStack', email=email_param)
base_stack = BaseStack(main_stack, 'BaseStack', app_stack.functions.my_lambda,
                       app_stack.functions.custom_config_rds,
                       app_stack.step_fn.state_machine)

#CdkworkshopStack(app, "projetox", env={'region': 'sa-east-1', 'account': os.environ['CDK_DEFAULT_ACCOUNT']})

app.synth()
Ejemplo n.º 8
0
    def __init__(self, scope: core.Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        # The code that defines your stack goes here

        vpc = ec2.Vpc(
            self,
            "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.ISOLATED,
                                        name="DB",
                                        cidr_mask=24)
            ],
            nat_gateways=2)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
            description="Specify the instance type you want").value_as_string

        key_name = CfnParameter(
            self,
            "ssh key",
            type="String",
            default="key_ireland",
            description="Specify your SSH key").value_as_string
        # Create Bastion Server
        bastion = ec2.BastionHostLinux(
            self,
            "Bastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="BastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create NLB
        nlb = elb.NetworkLoadBalancer(self,
                                      "emq-elb",
                                      vpc=vpc,
                                      internet_facing=True,
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        listener = nlb.add_listener("port1883", port=1883)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        asg = autoscaling.AutoScalingGroup(
            self,
            "emq-asg",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            health_check=HealthCheck.elb(grace=Duration.seconds(60)),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=4)

        user_defined_tags = self.node.try_get_context("tags")

        if user_defined_tags:
            tags = user_defined_tags.split(' ')
            core.Tags.of(asg).add(*tags)

        # NLB cannot associate with a security group therefore NLB object has no Connection object
        # Must modify manuall inbound rule of the newly created asg security group to allow access
        # from NLB IP only
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(1883),
            "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(18083),
                                            "Allow NLB access WEB UI")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")

        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from(bastion, ec2.Port.tcp(22),
                                   "Allow SSH from the bastion only")
        listener.add_targets("addTargetGroup", port=1883, targets=[asg])

        # @todo we need ssl terminataion
        # listenerUI.add_targets("addTargetGroup",
        #     port=18083,
        #     targets=[asg])
        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name)
Ejemplo n.º 9
0
    def __init__(self, scope: core.Construct, construct_id: str, env, **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)
        
        # The code that defines your stack goes here
        if self.node.try_get_context("tags"):
            self.user_defined_tags = self.node.try_get_context("tags").split(' ')
        else:
            self.user_defined_tags = None

        vpc = ec2.Vpc(self, "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PUBLIC,
                name="Public",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PRIVATE,
                name="Private",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.ISOLATED,
                name="DB",
                cidr_mask=24
            )
            ],
            nat_gateways=2
            )
        self.vpc = vpc


        # Route53
        int_zone = r53.PrivateHostedZone(self, r53_zone_name,
                                         zone_name = 'int.emqx',
                                         vpc = vpc
        )

        self.int_zone = int_zone

        # Define cfn parameters
        # ec2_type = CfnParameter(self, "ec2-instance-type",
        #     type="String", default="m5.2xlarge",
        #     description="Specify the instance type you want").value_as_string
        
        key_name = CfnParameter(self, "ssh key",
            type="String", default="key_ireland",
            description="Specify your SSH key").value_as_string

        sg = ec2.SecurityGroup(self, id = 'sg_int', vpc = vpc)
        self.sg = sg

        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'SSH frm anywhere')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(1883), 'MQTT TCP Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8883), 'MQTT TCP/TLS Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.udp(14567), 'MQTT Quic Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(18083), 'WEB UI')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4369), 'EMQX dist port 1')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4370), 'EMQX dist port 2')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8081), 'EMQX dashboard')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2379), 'etcd client port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2380), 'etcd peer port')

         # Create Bastion Server
        bastion = ec2.BastionHostLinux(self, "Bastion",
                                       vpc=vpc,
                                       subnet_selection=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                                       instance_name="BastionHostLinux",
                                       instance_type=ec2.InstanceType(instance_type_identifier="t3.nano"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(
            ec2.Port.tcp(22), "Internet access SSH")
    
        # Create NLB
        nlb = elb.NetworkLoadBalancer(self, "emq-elb",
                                      vpc=vpc,
                                      internet_facing=False, 
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        r53.ARecord(self, "AliasRecord",
                    zone = int_zone,
                    record_name = loadbalancer_dnsname,
                    target = r53.RecordTarget.from_alias(r53_targets.LoadBalancerTarget(nlb))
                    )

        self.nlb = nlb

        listener = nlb.add_listener("port1883", port=1883)
        listenerTLS = nlb.add_listener("port8883", port=8883) # TLS, emqx terminataion
        listenerQuic = nlb.add_listener("port14567", port=14567, protocol=elbv2.Protocol.UDP)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        # asg = autoscaling.AutoScalingGroup(self, "emq-asg",
        #                                    vpc=vpc,
        #                                    vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
        #                                    instance_type=ec2.InstanceType(
        #                                        instance_type_identifier=ec2_type),
        #                                    machine_image=linux_ami,
        #                                    security_group = sg,
        #                                    key_name=key_name,
        #                                    user_data=ec2.UserData.custom(user_data),
        #                                    health_check=HealthCheck.elb(grace=Duration.seconds(60)),
        #                                    desired_capacity=3,
        #                                    min_capacity=2,
        #                                    max_capacity=4
        # )

        # if self.user_defined_tags:
        #     core.Tags.of(asg).add(*self.user_defined_tags)

        # # NLB cannot associate with a security group therefore NLB object has no Connection object
        # # Must modify manuall inbound rule of the newly created asg security group to allow access
        # # from NLB IP only
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(1883), "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(18083), "Allow NLB access WEB UI")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(8081), "Allow emqx cluster dashboard access")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from(bastion,
        #     ec2.Port.tcp(22), "Allow SSH from the bastion only")

        self.setup_emqx(numEmqx, vpc, int_zone, sg, key_name)

        listener.add_targets('ec2',
                             port=1883,
                             targets=
                                 [ target.InstanceTarget(x)
                                   for x in self.emqx_vms])
        # @todo we need ssl terminataion
        listenerUI.add_targets('ec2',
                               port=18083,
                               targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerQuic.add_targets('ec2',
                                 port=14567,
                                 protocol=elbv2.Protocol.UDP,
                                 targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerTLS.add_targets('ec2',
                                port=8883,
                                targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        #self.setup_monitoring()

        self.setup_etcd(vpc, int_zone, sg, key_name)
        self.setup_loadgen(numLg, vpc, int_zone, sg, key_name, nlb.load_balancer_dns_name)

        self.setup_monitoring()

        core.CfnOutput(self, "Output",
            value=nlb.load_balancer_dns_name)
        core.CfnOutput(self, "SSH Entrypoint",
                       value=bastion.instance_public_ip)
        core.CfnOutput(self, "SSH cmds",
                       value="ssh -A -l ec2-user %s -L8888:%s:80 -L 9999:%s:80 -L 13000:%s:3000"
                       % (bastion.instance_public_ip, nlb.load_balancer_dns_name, self.mon_lb, self.mon_lb)
        )
    def __init__(self, app: App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        self.template_options.description = "(SO0123) Improving Forecast Accuracy with Machine Learning %%VERSION%% - This solution provides a mechanism to automate Amazon Forecast predictor and forecast generation and visualize it via an Amazon SageMaker Jupyter Notebook"

        # set up the template parameters
        email = CfnParameter(
            self,
            id="Email",
            type="String",
            description="Email to notify with forecast results",
            default="",
            max_length=50,
            allowed_pattern=
            r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$|^$)",
            constraint_description="Must be a valid email address or blank",
        )

        lambda_log_level = CfnParameter(
            self,
            id="LambdaLogLevel",
            type="String",
            description="Change the verbosity of the logs output to CloudWatch",
            default="WARNING",
            allowed_values=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
        )

        notebook_deploy = CfnParameter(
            self,
            id="NotebookDeploy",
            type="String",
            description="Deploy an Amazon SageMaker Jupyter Notebook instance",
            default="No",
            allowed_values=["Yes", "No"],
        )

        notebook_volume_size = CfnParameter(
            self,
            id="NotebookVolumeSize",
            type="Number",
            description=
            "Enter the size of the notebook instance EBS volume in GB",
            default=10,
            min_value=5,
            max_value=16384,
            constraint_description=
            "Must be an integer between 5 (GB) and 16384 (16 TB)",
        )

        notebook_instance_type = CfnParameter(
            self,
            id="NotebookInstanceType",
            type="String",
            description="Enter the type of the notebook instance",
            default="ml.t2.medium",
            allowed_values=[
                "ml.t2.medium",
                "ml.t3.medium",
                "ml.r5.large",
                "ml.c5.large",
            ],
        )

        quicksight_analysis_owner = CfnParameter(
            self,
            id="QuickSightAnalysisOwner",
            description=
            "With QuickSight Enterprise enabled, provide a QuickSight ADMIN user ARN to automatically create QuickSight analyses",
            default="",
            allowed_pattern="(^arn:.*:quicksight:.*:.*:user.*$|^$)",
        )

        # set up the metadata/ cloudformation interface
        template_options = TemplateOptions()
        template_options.add_parameter_group(
            label=
            "Improving Forecast Accuracy with Machine Learning Configuration",
            parameters=[email],
        )
        template_options.add_parameter_group(
            label="Visualization Options",
            parameters=[
                quicksight_analysis_owner,
                notebook_deploy,
                notebook_instance_type,
                notebook_volume_size,
            ],
        )
        template_options.add_parameter_group(label="Deployment Configuration",
                                             parameters=[lambda_log_level])
        template_options.add_parameter_label(email, "Email")
        template_options.add_parameter_label(lambda_log_level,
                                             "CloudWatch Log Level")
        template_options.add_parameter_label(notebook_deploy,
                                             "Deploy Jupyter Notebook")
        template_options.add_parameter_label(notebook_volume_size,
                                             "Jupyter Notebook volume size")
        template_options.add_parameter_label(notebook_instance_type,
                                             "Jupyter Notebook instance type")
        template_options.add_parameter_label(quicksight_analysis_owner,
                                             "Deploy QuickSight Dashboards")
        self.template_options.metadata = template_options.metadata

        solution_mapping = CfnMapping(
            self,
            "Solution",
            mapping={
                "Data": {
                    "ID": "SO0123",
                    "Version": "%%VERSION%%",
                    "SendAnonymousUsageData": "Yes",
                }
            },
        )

        source_mapping = CfnMapping(
            self,
            "SourceCode",
            mapping={
                "General": {
                    "S3Bucket": "%%BUCKET_NAME%%",
                    "KeyPrefix": "%%SOLUTION_NAME%%/%%VERSION%%",
                    "QuickSightSourceTemplateArn": "%%QUICKSIGHT_SOURCE%%",
                }
            },
        )

        # conditions
        create_notebook = CfnCondition(
            self,
            "CreateNotebook",
            expression=Fn.condition_equals(notebook_deploy, "Yes"),
        )
        email_provided = CfnCondition(
            self,
            "EmailProvided",
            expression=Fn.condition_not(Fn.condition_equals(email, "")),
        )
        send_anonymous_usage_data = CfnCondition(
            self,
            "SendAnonymousUsageData",
            expression=Fn.condition_equals(
                Fn.find_in_map("Solution", "Data", "SendAnonymousUsageData"),
                "Yes"),
        )
        create_analysis = CfnCondition(
            self,
            "CreateAnalysis",
            expression=Fn.condition_not(
                Fn.condition_equals(quicksight_analysis_owner, ""), ),
        )

        # Step function and state machine
        fns = LambdaFunctions(self, "Functions", log_level=lambda_log_level)

        # SNS
        notifications = Notifications(
            self,
            "NotificationConfiguration",
            lambda_function=fns.functions["SNS"],
            email=email,
            email_provided=email_provided,
        )

        # Custom Resources
        unique_name = CfnResource(
            self,
            "UniqueName",
            type="Custom::UniqueName",
            properties={
                "ServiceToken":
                fns.functions["CfnResourceUniqueName"].function_arn
            },
        )
        unique_name.override_logical_id("UniqueName")

        data_bucket_name_resource = CfnResource(
            self,
            "DataBucketName",
            type="Custom::BucketName",
            properties={
                "ServiceToken":
                fns.functions["CfnResourceBucketName"].function_arn,
                "BucketPurpose": "data-bucket",
                "StackName": Aws.STACK_NAME,
                "Id": unique_name.get_att("Id"),
            },
        )
        data_bucket_name_resource.override_logical_id("DataBucketName")

        # Buckets
        access_logs_bucket = self.secure_bucket(
            "AccessLogsBucket",
            suppressions=[
                CfnNagSuppression(
                    "W35",
                    "This bucket is used as the logging destination for forecast datasets and exports",
                )
            ],
            access_control=BucketAccessControl.LOG_DELIVERY_WRITE,
        )

        athena_bucket = self.secure_bucket(
            "AthenaBucket",
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix="athena-bucket-access-logs/",
        )

        data_bucket = self.secure_bucket(
            "ForecastBucket",
            lifecycle_rules=[
                LifecycleRule(
                    abort_incomplete_multipart_upload_after=Duration.days(3),
                    enabled=True,
                ),
                LifecycleRule(expiration=Duration.days(1),
                              prefix="raw/",
                              enabled=True),
            ],
            bucket_name=data_bucket_name_resource.get_att("Name").to_string(),
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix="forecast-bucket-access-logs/",
        )
        data_bucket.node.default_child.add_property_override(
            "NotificationConfiguration",
            {
                "LambdaConfigurations": [{
                    "Function":
                    fns.functions["S3NotificationLambda"].function_arn,
                    "Event":
                    "s3:ObjectCreated:*",
                    "Filter": {
                        "S3Key": {
                            "Rules": [
                                {
                                    "Name": "prefix",
                                    "Value": "train/"
                                },
                                {
                                    "Name": "suffix",
                                    "Value": ".csv"
                                },
                            ]
                        }
                    },
                }]
            },
        )

        # Glue and Athena
        glue = Glue(self, "GlueResources", unique_name)
        athena = Athena(self, "AthenaResources", athena_bucket=athena_bucket)

        # Configure permissions for functions
        fns.set_s3_notification_permissions(data_bucket_name_resource)
        fns.set_forecast_s3_access_permissions(
            name="DatasetImport",
            function=fns.functions["CreateDatasetImportJob"],
            data_bucket_name_resource=data_bucket_name_resource,
        )
        fns.set_forecast_s3_access_permissions(
            name="ForecastExport",
            function=fns.functions["CreateForecast"],
            data_bucket_name_resource=data_bucket_name_resource,
        )
        fns.set_forecast_etl_permissions(
            function=fns.functions["PrepareForecastExport"],
            database=glue.database,
            workgroup=athena.workgroup,
            quicksight_principal=quicksight_analysis_owner,
            quicksight_source=source_mapping,
            athena_bucket=athena_bucket,
            data_bucket_name_resource=data_bucket_name_resource,
        )
        fns.set_forecast_permissions(
            "CreateDatasetGroup",
            data_bucket_name_resource=data_bucket_name_resource)
        fns.set_forecast_permissions(
            "CreateDatasetImportJob",
            data_bucket_name_resource=data_bucket_name_resource,
        )
        fns.set_forecast_permissions(
            "CreateForecast",
            data_bucket_name_resource=data_bucket_name_resource)
        fns.set_forecast_permissions(
            "CreatePredictor",
            data_bucket_name_resource=data_bucket_name_resource)
        fns.set_forecast_permissions(
            "PrepareForecastExport",
            data_bucket_name_resource=data_bucket_name_resource)

        # notebook (conditional on 'create_notebook')
        notebook = Notebook(
            self,
            "Notebook",
            buckets=[data_bucket],
            instance_type=notebook_instance_type.value_as_string,
            instance_volume_size=notebook_volume_size.value_as_number,
            notebook_path=Path(__file__).parent.parent.parent.joinpath(
                "notebook", "samples", "notebooks"),
            notebook_destination_bucket=data_bucket,
            notebook_destination_prefix="notebooks",
        )
        Aspects.of(notebook).add(ConditionalResources(create_notebook))

        # solutions metrics (conditional on 'send_anonymous_usage_data')
        metrics = Metrics(
            self,
            "SolutionMetrics",
            metrics_function=fns.functions["CfnResourceSolutionMetrics"],
            metrics={
                "Solution":
                solution_mapping.find_in_map("Data", "ID"),
                "Version":
                solution_mapping.find_in_map("Data", "Version"),
                "Region":
                Aws.REGION,
                "NotebookDeployed":
                Fn.condition_if(create_notebook.node.id, "Yes", "No"),
                "NotebookType":
                Fn.condition_if(
                    create_notebook.node.id,
                    notebook_instance_type.value_as_string,
                    Aws.NO_VALUE,
                ),
                "QuickSightDeployed":
                Fn.condition_if(create_analysis.node.id, "Yes", "No"),
            },
        )
        Aspects.of(metrics).add(
            ConditionalResources(send_anonymous_usage_data))

        # outputs
        CfnOutput(self, "ForecastBucketName", value=data_bucket.bucket_name)
        CfnOutput(self, "AthenaBucketName", value=athena_bucket.bucket_name)
        CfnOutput(self,
                  "StepFunctionsName",
                  value=fns.state_machine.state_machine_name)
Ejemplo n.º 11
0
    def __init__(self, scope: core.Construct, id: str, vpc, env,
                 **kwargs) -> None:
        super().__init__(scope, id, env=env, **kwargs)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
            description="Specify the instance type you want").value_as_string

        key_name = CfnParameter(
            self,
            "ssh key",
            type="String",
            default="key_ireland",
            description="Specify your SSH key").value_as_string

        # Create Bastion Server
        bastion = ec2.BastionHostLinux(
            self,
            "Bastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="BastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create NLB
        nlb = elb.NetworkLoadBalancer(self,
                                      "emq-elb",
                                      vpc=vpc,
                                      internet_facing=True,
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        listener = nlb.add_listener("port1883", port=1883)

        # Create Autoscaling Group with desired 2*EC2 hosts
        asg = autoscaling.AutoScalingGroup(
            self,
            "emq-asg",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            health_check=HealthCheck.elb(grace=Duration.seconds(60)),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=4)

        # NLB cannot associate with a security group therefore NLB object has no Connection object
        # Must modify manuall inbound rule of the newly created asg security group to allow access
        # from NLB IP only
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(1883),
            "Allow NLB access 1883 port of EC2 in Autoscaling Group")

        asg.connections.allow_from(bastion, ec2.Port.tcp(22),
                                   "Allow SSH from the bastion only")

        listener.add_targets("addTargetGroup", port=1883, targets=[asg])

        core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name)