Esempio n. 1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.platform_resources = ImportedResources(self, self.stack_name)

        ###

        ECS_APP_NAME = "octicketing-microservice"
        ECS_DEPLOYMENT_GROUP_NAME = "octicketingECSBlueGreen"
        ECS_DEPLOYMENT_CONFIG_NAME = "CodeDeployDefault.ECSLinear10PercentEvery1Minutes"
        ECS_DEPLOYMENT_CONFIG_ALL = "CodeDeployDefault.ECSAllAtOnce"
        ECS_TASKSET_TERMINATION_WAIT_TIME = 10
        ECS_TASK_FAMILY_NAME = "octicketing-service"
        ECS_APP_LOG_GROUP_NAME = "/ecs/" + ECS_TASK_FAMILY_NAME

        DUMMY_APP_NAME = "hello-world-microservice"
        DUMMY_TASK_FAMILY_NAME = "hello-world-service"
        DUMMY_APP_LOG_GROUP_NAME = "/ecs/dummy-" + ECS_TASK_FAMILY_NAME
        DUMMY_CONTAINER_IMAGE = self.account + ".dkr.ecr." + \
            self.region + ".amazonaws.com/hello-world:latest"
        Dmmuyvare = ""
        # =============================================================================
        # ECR and CodeCommit repositories for the Blue/ Green deployment
        # =============================================================================

        # ECR repository for the docker images
        self.octicketing_ecr_repo = aws_ecr.Repository(
            self,
            "OcticketingECRRepo",
            repository_name=ECS_APP_NAME,
            removal_policy=core.RemovalPolicy.DESTROY)

        self.octicketing_code_repo = aws_codecommit.Repository(
            self,
            ECS_APP_NAME + "-bg",
            repository_name=ECS_APP_NAME + "-bg",
            description=ECS_APP_NAME + "blue-green service repository")
        core.CfnOutput(self,
                       'BGRepoName',
                       value=self.octicketing_code_repo.repository_name,
                       export_name='OcticketingBGRepoName')
        core.CfnOutput(self,
                       'BGRepoARN',
                       value=self.octicketing_code_repo.repository_arn,
                       export_name='OcticketingBGRepoARN')

        # =============================================================================
        #   CODE BUILD and ECS TASK ROLES for the Blue/ Green deployment
        # =============================================================================

        # IAM role for the Code Build project
        codeBuildServiceRole = aws_iam.Role(
            self,
            "codeBuildServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codebuild.amazonaws.com'))
        inlinePolicyForCodeBuild = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:InitiateLayerUpload", "ecr:BatchGetImage",
                "ecr:GetDownloadUrlForLayer", "ecr:UploadLayerPart",
                "ecr:CompleteLayerUpload", "ecr:PutImage"
            ],
            resources=["*"])

        codeBuildServiceRole.add_to_policy(inlinePolicyForCodeBuild)

        # ECS task role
        ecsTaskRole = aws_iam.Role(
            self,
            "ecsTaskRoleForWorkshop",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        ecsTaskRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonECSTaskExecutionRolePolicy"))
        ecsTaskRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "SecretsManagerReadWrite"))

        # =============================================================================
        # CODE DEPLOY APPLICATION for the Blue/ Green deployment
        # =============================================================================

        # Creating the code deploy application
        codeDeployApplication = codedeploy.EcsApplication(
            self, "OcticketingCodeDeploy")

        # Creating the code deploy service role
        codeDeployServiceRole = aws_iam.Role(
            self,
            "codeDeployServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codedeploy.amazonaws.com'))
        codeDeployServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSCodeDeployRoleForECS"))

        # IAM role for custom lambda function
        customLambdaServiceRole = aws_iam.Role(
            self,
            "codeDeployCustomLambda",
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'))

        inlinePolicyForLambda = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codedeploy:List*",
                "codedeploy:Get*", "codedeploy:UpdateDeploymentGroup",
                "codedeploy:CreateDeploymentGroup",
                "codedeploy:DeleteDeploymentGroup"
            ],
            resources=["*"])

        customLambdaServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        customLambdaServiceRole.add_to_policy(inlinePolicyForLambda)

        # Custom resource to create the deployment group
        createDeploymentGroupLambda = aws_lambda.Function(
            self,
            'createDeploymentGroupLambda',
            code=aws_lambda.Code.from_asset("custom_resources"),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='create_deployment_group.handler',
            role=customLambdaServiceRole,
            description="Custom resource to create deployment group",
            memory_size=128,
            timeout=core.Duration.seconds(60))

        # ================================================================================================
        # CloudWatch Alarms for 4XX errors
        blue4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": self.platform_resources.blue_target_full_name,
                "LoadBalancer": self.platform_resources.alb_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))

        self.blue_targetAlarm = aws_cloudwatch.Alarm(
            self,
            "blue4xxErrors",
            alarm_name="Blue_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Blue target group",
            metric=blue4xxMetric,
            threshold=1,
            evaluation_periods=1)

        green4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": self.platform_resources.green_target_full_name,
                "LoadBalancer": self.platform_resources.alb_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))
        self.green_targetAlarm = aws_cloudwatch.Alarm(
            self,
            "green4xxErrors",
            alarm_name="Green_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Green target group",
            metric=green4xxMetric,
            threshold=1,
            evaluation_periods=1)

        # ================================================================================================
        # DUMMY TASK DEFINITION for the initial service creation
        # This is required for the service being made available to create the CodeDeploy Deployment Group
        # ================================================================================================
        sampleTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "sampleTaskDefn",
            family=DUMMY_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        sampleContainerDefn = sampleTaskDefinition.add_container(
            "sampleAppContainer",
            image=aws_ecs.ContainerImage.from_registry(DUMMY_CONTAINER_IMAGE),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "sampleAppLogGroup",
                log_group_name=DUMMY_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=DUMMY_APP_NAME),
            docker_labels={"name": DUMMY_APP_NAME})

        port_mapping = aws_ecs.PortMapping(container_port=8080,
                                           protocol=aws_ecs.Protocol.TCP)

        sampleContainerDefn.add_port_mappings(port_mapping)

        # ================================================================================================
        # ECS task definition using ECR image
        # Will be used by the CODE DEPLOY for Blue/Green deployment
        # ================================================================================================
        OcticketingTaskDef = aws_ecs.FargateTaskDefinition(
            self,
            "appTaskDefn",
            family=ECS_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        # =============================================================================
        octicketing_cont_def = OcticketingTaskDef.add_container(
            "OcticketingAppContainer",
            image=aws_ecs.ContainerImage.from_ecr_repository(
                self.octicketing_ecr_repo, "latest"),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "OcticketingAppLogGroup",
                log_group_name=ECS_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=ECS_APP_NAME),
            docker_labels={"name": ECS_APP_NAME})
        octicketing_cont_def.add_port_mappings(port_mapping)

        # =============================================================================
        # ECS SERVICE for the Blue/ Green deployment
        # =============================================================================

        OcticketingAppService = aws_ecs.FargateService(
            self,
            "OcticketingAppService",
            cluster=self.platform_resources.ecs_cluster,
            task_definition=sampleTaskDefinition,
            health_check_grace_period=core.Duration.seconds(10),
            platform_version=aws_ecs.FargatePlatformVersion.VERSION1_4,
            desired_count=1,
            deployment_controller={
                "type": aws_ecs.DeploymentControllerType.CODE_DEPLOY
            },
            service_name=ECS_APP_NAME)

        OcticketingAppService.connections.allow_from(
            self.platform_resources.alb, aws_ec2.Port.tcp(80))
        OcticketingAppService.connections.allow_from(
            self.platform_resources.alb, aws_ec2.Port.tcp(8080))
        OcticketingAppService.attach_to_application_target_group(
            self.platform_resources.blue_target)

        # =============================================================================
        # CODE DEPLOY - Deployment Group CUSTOM RESOURCE for the Blue/ Green deployment
        # =============================================================================

        core.CustomResource(
            self,
            'customEcsDeploymentGroup',
            service_token=createDeploymentGroupLambda.function_arn,
            properties={
                "ApplicationName": codeDeployApplication.application_name,
                "DeploymentGroupName": ECS_DEPLOYMENT_GROUP_NAME,
                "DeploymentConfigName": ECS_DEPLOYMENT_CONFIG_NAME,
                "ServiceRoleArn": codeDeployServiceRole.role_arn,
                "BlueTargetGroup": self.platform_resources.blue_target_name,
                "GreenTargetGroup": self.platform_resources.green_target_name,
                "ProdListenerArn":
                self.platform_resources.prod_listener.listener_arn,
                "TestListenerArn":
                self.platform_resources.test_listener.listener_arn,
                "EcsClusterName":
                self.platform_resources.ecs_cluster.cluster_name,
                "EcsServiceName": OcticketingAppService.service_name,
                "TerminationWaitTime": ECS_TASKSET_TERMINATION_WAIT_TIME,
                "BlueGroupAlarm": self.blue_targetAlarm.alarm_name,
                "GreenGroupAlarm": self.green_targetAlarm.alarm_name,
            })

        ecsDeploymentGroup = codedeploy.EcsDeploymentGroup.from_ecs_deployment_group_attributes(
            self,
            "ecsDeploymentGroup",
            application=codeDeployApplication,
            deployment_group_name=ECS_DEPLOYMENT_GROUP_NAME,
            deployment_config=codedeploy.EcsDeploymentConfig.
            from_ecs_deployment_config_name(self, "ecsDeploymentConfig",
                                            ECS_DEPLOYMENT_CONFIG_NAME))
        # =============================================================================
        # CODE BUILD PROJECT for the Blue/ Green deployment
        # =============================================================================

        # Creating the code build project
        OcticketingAppcodebuild = aws_codebuild.Project(
            self,
            "OcticketingAppcodebuild",
            role=codeBuildServiceRole,
            environment=aws_codebuild.BuildEnvironment(
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_4_0,
                compute_type=aws_codebuild.ComputeType.SMALL,
                privileged=True,
                environment_variables={
                    'REPOSITORY_URI': {
                        'value':
                        self.octicketing_ecr_repo.repository_uri,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_EXECUTION_ARN': {
                        'value':
                        ecsTaskRole.role_arn,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_FAMILY': {
                        'value':
                        ECS_TASK_FAMILY_NAME,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    }
                }),
            source=aws_codebuild.Source.code_commit(
                repository=self.octicketing_code_repo))

        # =============================================================================
        # CODE PIPELINE for Blue/Green ECS deployment
        # =============================================================================

        codePipelineServiceRole = aws_iam.Role(
            self,
            "codePipelineServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codepipeline.amazonaws.com'))

        inlinePolicyForCodePipeline = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codecommit:Get*",
                "codecommit:List*", "codecommit:GitPull",
                "codecommit:UploadArchive", "codecommit:CancelUploadArchive",
                "codebuild:BatchGetBuilds", "codebuild:StartBuild",
                "codedeploy:CreateDeployment", "codedeploy:Get*",
                "codedeploy:RegisterApplicationRevision", "s3:Get*",
                "s3:List*", "s3:PutObject"
            ],
            resources=["*"])

        codePipelineServiceRole.add_to_policy(inlinePolicyForCodePipeline)

        sourceArtifact = codepipeline.Artifact('sourceArtifact')
        buildArtifact = codepipeline.Artifact('buildArtifact')

        # S3 bucket for storing the code pipeline artifacts
        OcticketingAppArtifactsBucket = s3.Bucket(
            self,
            "OcticketingAppArtifactsBucket",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # S3 bucket policy for the code pipeline artifacts
        denyUnEncryptedObjectUploads = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:PutObject"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[OcticketingAppArtifactsBucket.bucket_arn + "/*"],
            conditions={
                "StringNotEquals": {
                    "s3:x-amz-server-side-encryption": "aws:kms"
                }
            })

        denyInsecureConnections = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:*"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[OcticketingAppArtifactsBucket.bucket_arn + "/*"],
            conditions={"Bool": {
                "aws:SecureTransport": "false"
            }})

        OcticketingAppArtifactsBucket.add_to_resource_policy(
            denyUnEncryptedObjectUploads)
        OcticketingAppArtifactsBucket.add_to_resource_policy(
            denyInsecureConnections)

        # Code Pipeline - CloudWatch trigger event is created by CDK
        codepipeline.Pipeline(
            self,
            "ecsBlueGreen",
            role=codePipelineServiceRole,
            artifact_bucket=OcticketingAppArtifactsBucket,
            stages=[
                codepipeline.StageProps(
                    stage_name='Source',
                    actions=[
                        aws_codepipeline_actions.CodeCommitSourceAction(
                            action_name='Source',
                            repository=self.octicketing_code_repo,
                            output=sourceArtifact,
                        )
                    ]),
                codepipeline.StageProps(
                    stage_name='Build',
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            action_name='Build',
                            project=OcticketingAppcodebuild,
                            input=sourceArtifact,
                            outputs=[buildArtifact])
                    ]),
                codepipeline.StageProps(
                    stage_name='Deploy',
                    actions=[
                        aws_codepipeline_actions.CodeDeployEcsDeployAction(
                            action_name='Deploy',
                            deployment_group=ecsDeploymentGroup,
                            app_spec_template_input=buildArtifact,
                            task_definition_template_input=buildArtifact,
                        )
                    ])
            ])

        # =============================================================================
        # Export the outputs
        # =============================================================================
        core.CfnOutput(
            self,
            "ecsBlueGreenCodeRepo",
            description="Demo app code commit repository",
            export_name="ecsBlueGreenDemoAppRepo",
            value=self.octicketing_code_repo.repository_clone_url_http)
Esempio n. 2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Step Function Starts Here

        # The first thing we need to do is see if they are asking for pineapple on a pizza
        pineapple_check_lambda = _lambda.Function(
            self,
            "pineappleCheckLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="orderPizza.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
        )

        # Step functions are built up of steps, we need to define our first step
        order_pizza = step_fn_tasks.LambdaInvoke(
            self,
            'Order Pizza Job',
            lambda_function=pineapple_check_lambda,
            input_path='$.flavour',
            result_path='$.pineappleAnalysis',
            payload_response_only=True)

        # Pizza Order failure step defined
        pineapple_detected = step_fn.Fail(self,
                                          'Sorry, We Dont add Pineapple',
                                          cause='They asked for Pineapple',
                                          error='Failed To Make Pizza')

        # If they didnt ask for pineapple let's cook the pizza
        cook_pizza = step_fn.Succeed(self,
                                     'Lets make your pizza',
                                     output_path='$.pineappleAnalysis')

        # If they ask for a pizza with pineapple, fail. Otherwise cook the pizza
        definition = step_fn.Chain \
            .start(order_pizza) \
            .next(step_fn.Choice(self, 'With Pineapple?')
                  .when(step_fn.Condition.boolean_equals('$.pineappleAnalysis.containsPineapple', True),
                        pineapple_detected)
                  .otherwise(cook_pizza))

        state_machine = step_fn.StateMachine(
            self,
            'StateMachine',
            definition=definition,
            timeout=core.Duration.minutes(5),
            tracing_enabled=True,
            state_machine_type=step_fn.StateMachineType.EXPRESS)

        # HTTP API Definition

        # Give our gateway permissions to interact with SNS
        http_api_role = iam.Role(
            self,
            'HttpApiRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'),
            inline_policies={
                "AllowSFNExec":
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=["states:StartSyncExecution"],
                        effect=iam.Effect.ALLOW,
                        resources=[state_machine.state_machine_arn])
                ])
            })

        api = api_gw.HttpApi(self,
                             'the_state_machine_api',
                             create_default_stage=True)

        # create an AWS_PROXY integration between the HTTP API and our Step Function
        integ = api_gw.CfnIntegration(
            self,
            'Integ',
            api_id=api.http_api_id,
            integration_type='AWS_PROXY',
            connection_type='INTERNET',
            integration_subtype='StepFunctions-StartSyncExecution',
            credentials_arn=http_api_role.role_arn,
            request_parameters={
                "Input": "$request.body",
                "StateMachineArn": state_machine.state_machine_arn
            },
            payload_format_version="1.0",
            timeout_in_millis=10000)

        api_gw.CfnRoute(self,
                        'DefaultRoute',
                        api_id=api.http_api_id,
                        route_key=api_gw.HttpRouteKey.DEFAULT.key,
                        target="integrations/" + integ.ref)

        core.CfnOutput(self, 'HTTP API URL', value=api.url)
    def __init__(self, scope: core.Construct, id: str, eksname: str,
                 solution_id: str, version: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.template_options.description = "(SO0141) SQL based ETL with Apache Spark on Amazon EKS. This solution provides a SQL based ETL option with a open-source declarative framework powered by Apache Spark."
        source_dir = os.path.split(os.environ['VIRTUAL_ENV'])[0] + '/source'

        # Cloudformation input params
        datalake_bucket = core.CfnParameter(
            self,
            "datalakebucket",
            type="String",
            description=
            "Your existing S3 bucket to be accessed by Jupyter Notebook and ETL job. Default: blank",
            default="")
        login_name = core.CfnParameter(
            self,
            "jhubuser",
            type="String",
            description="Your username login to jupyter hub",
            default="sparkoneks")

        # Auto-generate a user login in secrets manager
        key = kms.Key(self,
                      'KMSKey',
                      removal_policy=core.RemovalPolicy.DESTROY,
                      enable_key_rotation=True)
        key.add_alias("alias/secretsManager")
        jhub_secret = secmger.Secret(
            self,
            'jHubPwd',
            generate_secret_string=secmger.SecretStringGenerator(
                exclude_punctuation=True,
                secret_string_template=json.dumps(
                    {'username': login_name.value_as_string}),
                generate_string_key="password"),
            removal_policy=core.RemovalPolicy.DESTROY,
            encryption_key=key)

        # 1. a new bucket to store app code and logs
        self.app_s3 = S3AppCodeConst(self, 'appcode')

        # 2. push docker image to ECR via AWS CICD pipeline
        ecr_image = DockerPipelineConstruct(self, 'image',
                                            self.app_s3.artifact_bucket)
        ecr_image.node.add_dependency(self.app_s3)
        core.CfnOutput(self, 'IMAGE_URI', value=ecr_image.image_uri)

        # 3. EKS base infrastructure
        network_sg = NetworkSgConst(self, 'network-sg', eksname,
                                    self.app_s3.code_bucket)
        iam = IamConst(self, 'iam_roles', eksname)
        eks_cluster = EksConst(self, 'eks_cluster', eksname, network_sg.vpc,
                               iam.managed_node_role, iam.admin_role)
        EksSAConst(self, 'eks_sa', eks_cluster.my_cluster, jhub_secret)
        base_app = EksBaseAppConst(self, 'eks_base_app',
                                   eks_cluster.my_cluster)

        # 4. Spark app access control
        app_security = SparkOnEksSAConst(self, 'spark_service_account',
                                         eks_cluster.my_cluster,
                                         login_name.value_as_string,
                                         self.app_s3.code_bucket,
                                         datalake_bucket.value_as_string)
        app_security.node.add_dependency(base_app.secret_created)
        # 5. Install Arc Jupyter notebook in EKS
        jhub_install = eks_cluster.my_cluster.add_helm_chart(
            'JHubChart',
            chart='jupyterhub',
            repository='https://jupyterhub.github.io/helm-chart',
            release='jhub',
            version='0.11.1',
            namespace='jupyter',
            create_namespace=False,
            values=load_yaml_replace_var_local(
                source_dir + '/app_resources/jupyter-values.yaml',
                fields={
                    "{{codeBucket}}": self.app_s3.code_bucket,
                    "{{region}}": core.Aws.REGION
                }))
        jhub_install.node.add_dependency(app_security)
        # EKS get Jupyter login dynamically from secrets manager
        name_parts = core.Fn.split('-', jhub_secret.secret_name)
        name_no_suffix = core.Fn.join(
            '-',
            [core.Fn.select(0, name_parts),
             core.Fn.select(1, name_parts)])

        config_hub = eks.KubernetesManifest(
            self,
            'JHubConfig',
            cluster=eks_cluster.my_cluster,
            manifest=load_yaml_replace_var_local(
                source_dir + '/app_resources/jupyter-config.yaml',
                fields={
                    "{{MY_SA}}": app_security.jupyter_sa,
                    "{{REGION}}": core.Aws.REGION,
                    "{{SECRET_NAME}}": name_no_suffix
                },
                multi_resource=True))
        config_hub.node.add_dependency(jhub_install)

        # 6. Install ETL orchestrator - Argo in EKS
        # can be replaced by other workflow tool, eg. Airflow
        argo_install = eks_cluster.my_cluster.add_helm_chart(
            'ARGOChart',
            chart='argo-workflows',
            repository='https://argoproj.github.io/argo-helm',
            release='argo',
            version='0.1.4',
            namespace='argo',
            create_namespace=True,
            values=load_yaml_local(source_dir +
                                   '/app_resources/argo-values.yaml'))
        argo_install.node.add_dependency(config_hub)
        # Create argo workflow template for Spark with T-shirt size
        submit_tmpl = eks_cluster.my_cluster.add_manifest(
            'SubmitSparkWrktmpl',
            load_yaml_local(source_dir + '/app_resources/spark-template.yaml'))
        submit_tmpl.node.add_dependency(argo_install)

        # 7. (OPTIONAL) retrieve ALB DNS Name to enable CloudFront in the nested stack.
        # It is used to serve HTTPS requests with its default domain name.
        # Recommend to issue your own TLS certificate, and delete the CF components.
        self._jhub_alb = eks.KubernetesObjectValue(
            self,
            'jhubALB',
            cluster=eks_cluster.my_cluster,
            json_path='..status.loadBalancer.ingress[0].hostname',
            object_type='ingress.networking',
            object_name='jupyterhub',
            object_namespace='jupyter',
            timeout=core.Duration.minutes(10))
        self._jhub_alb.node.add_dependency(config_hub)

        self._argo_alb = eks.KubernetesObjectValue(
            self,
            'argoALB',
            cluster=eks_cluster.my_cluster,
            json_path='..status.loadBalancer.ingress[0].hostname',
            object_type='ingress.networking',
            object_name='argo-argo-workflows-server',
            object_namespace='argo',
            timeout=core.Duration.minutes(10))
        self._argo_alb.node.add_dependency(argo_install)

        # 8. (OPTIONAL) Send solution metrics to AWS
        # turn it off from the CloudFormation mapping section if prefer.
        send_metrics = solution_metrics.SendAnonymousData(
            self,
            "SendMetrics",
            network_sg.vpc,
            self.app_s3.artifact_bucket,
            self.app_s3.s3_deploy_contrust,
            metrics={
                "Solution":
                solution_id,
                "Region":
                core.Aws.REGION,
                "SolutionVersion":
                version,
                "UUID":
                "MY_UUID",
                "UseDataLakeBucket":
                "True" if not datalake_bucket.value_as_string else "False",
                "UseAWSCICD":
                "True" if ecr_image.image_uri else "False",
                "NoAZs":
                len(network_sg.vpc.availability_zones)
            })
        send_metrics.node.add_dependency(self.app_s3.s3_deploy_contrust)

        # 9. (OPTIONAL) Override the cfn Nag rules for AWS Solution CICD deployment
        # remove the section if your CI/CD pipeline doesn't use the cfn_nag utility to validate the CFN.
        k8s_ctl_node = self.node.find_child(
            '@aws-cdk--aws-eks.KubectlProvider')
        cluster_resrc_node = self.node.find_child(
            '@aws-cdk--aws-eks.ClusterResourceProvider')
        scan.suppress_cfnnag_rule(
            'W12', 'by default the role has * resource',
            self.node.find_child('eks_cluster').node.find_child('EKS').node.
            default_child.node.find_child('CreationRole').node.find_child(
                'DefaultPolicy').node.default_child)
        scan.suppress_cfnnag_rule(
            'W11', 'by default the role has * resource',
            self.node.find_child(
                'Custom::AWSCDKOpenIdConnectProviderCustomResourceProvider').
            node.find_child('Role'))
        scan.suppress_lambda_cfnnag_rule(
            k8s_ctl_node.node.find_child('Handler').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            k8s_ctl_node.node.find_child('Provider').node.find_child(
                'framework-onEvent').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child(
                'Custom::CDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756C').
            node.default_child)
        # scan.suppress_lambda_cfnnag_rule(self.node.find_child('Custom::S3AutoDeleteObjectsCustomResourceProvider').node.find_child('Handler'))
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child(
                'Custom::AWSCDKOpenIdConnectProviderCustomResourceProvider').
            node.find_child('Handler'))
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child('AWSCDKCfnUtilsProviderCustomResourceProvider'
                                 ).node.find_child('Handler'))
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child(
                'OnEventHandler').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child(
                'IsCompleteHandler').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child('Provider').node.find_child(
                'framework-isComplete').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child('Provider').node.find_child(
                'framework-onTimeout').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child('Provider').node.find_child(
                'framework-onEvent').node.default_child)
        scan.suppress_network_cfnnag_rule(
            self.node.find_child('eks_cluster').node.find_child('EKS').node.
            find_child('ControlPlaneSecurityGroup').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child('SendMetrics').node.find_child(
                'LambdaProvider').node.find_child(
                    'framework-onEvent').node.default_child)
        scan.suppress_network_cfnnag_rule(
            self.node.find_child('SendMetrics').node.find_child(
                'LambdaProvider').node.find_child('framework-onEvent').node.
            find_child('SecurityGroup').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child(
                'SingletonLambda75248a819138468c9ba1bca6c7137599').node.
            default_child)
        scan.suppress_network_cfnnag_rule(
            self.node.find_child(
                'SingletonLambda75248a819138468c9ba1bca6c7137599').node.
            find_child('SecurityGroup').node.default_child)
Esempio n. 4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # TODO: Create S3 2 buckets:
        # RemovalPolicy should be left to the default RETAIN in production to
        # avoid any data loss. S3 buckets should be manually deleted.

        data_bucket = s3.Bucket(
            self, "-data-", removal_policy=core.RemovalPolicy.DESTROY
        )

        processed_logs_bucket = s3.Bucket(
            self, "-processed-logs-", removal_policy=core.RemovalPolicy.DESTROY
        )

        # TODO: Change destination bucket in the lambda function
        process_logs_lambda = _lambda.Function(
            self,
            "ProcessLogs",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("lambda"),
            handler="process_logs.handler",
            environment={"PROCESSED_LOGS_BUCKET": processed_logs_bucket.node.unique_id},
        )

        data_bucket.add_event_notification(
            event=s3.EventType.OBJECT_CREATED, dest=process_logs_lambda
        )

        # TODO: Create the datalake DB in lake formation
        # db_name = id + "-db"
        # Check Upgrading AWS Glue Data Permissions to the AWS Lake Formation Model: https://docs.aws.amazon.com/lake-formation/latest/dg/upgrade-glue-lake-formation.html

        # TODO: Create Glue crawlers
        # One crawler for mysql data:
        # Source type: Data stores
        # Data store: s3
        # Crawl data in specified paths
        # Path: s3://data_bucket_name/<table name>
        # Add as many paths as tables
        # IAM role: LakeFormationWorkflowRole
        # Run only once to create the tables
        # Database: db_name
        # Advanced settings:
        # Grouping behavior for S3 data (optional):
        # Create a single schema for each S3 path
        # When the crawler detects schema changes in the data store, how should AWS Glue handle table updates in the data catalog?
        # Ignore the change and don't update the table in the data catalog
        # Update all new and existing partitions with metadata from the table
        # How should AWS Glue handle deleted objects in the data store?
        # Mark the table as deprecated in the data catalog

        # TODO: Crate Glue jobs

        # TODO: Create Athena link to datalake

        # TODO: Create Quicksight basic reports

        core.CfnOutput(self, "{}-unique-id".format(id), value=self.node.unique_id)
    def __init__(self, scope: core.Construct, id: str, vpc, ecs_cluster, role, target_url: str, number_of_tasks = 1, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
    
        name = id
        
        
        task_def = ecs.Ec2TaskDefinition(self, name,
            network_mode=ecs.NetworkMode.AWS_VPC
        )
        
        if role == "slave":
            container_env={"TARGET_URL": target_url,
                "LOCUST_MODE": role,
                #Need to update to pull the name from Cloudmap
                "LOCUST_MASTER_HOST": "master.loadgen"
            }
        else:
            container_env={"TARGET_URL": target_url,
                "LOCUST_MODE": role 
            }
            
        locust_container = task_def.add_container(
            name + "container",
            # Use Locust image from DockerHub
            # Or not. we'll use an image we create using the dockerfile in ./locust
            image=ecs.ContainerImage.from_asset("locust"),
            memory_reservation_mib=512,
            essential=True,
            logging=ecs.LogDrivers.aws_logs(stream_prefix=name),
            environment=container_env
        )
        
        
        web_port_mapping = ecs.PortMapping(container_port=8089)
        if role != "standalone":
            slave1_port_mapping = ecs.PortMapping(container_port=5557)
            slave2_port_mapping = ecs.PortMapping(container_port=5558)
            locust_container.add_port_mappings(web_port_mapping,slave1_port_mapping,slave2_port_mapping)
        else:
            locust_container.add_port_mappings(web_port_mapping)


        security_group = ec2.SecurityGroup(
            self, "Locust",
            vpc=vpc,
            allow_all_outbound=True
        )
        
        security_group.add_ingress_rule(
            ec2.Peer.any_ipv4(),
            ec2.Port.tcp(8089)
        )
        
        if role != "standalone":
            security_group.add_ingress_rule(
                ec2.Peer.any_ipv4(),
                ec2.Port.tcp(5557)
            )
            security_group.add_ingress_rule(
                ec2.Peer.any_ipv4(),
                ec2.Port.tcp(5558)
            )
        
        # Create the ecs service
        locust_service = ecs.Ec2Service(
            self, name +"service",
            cluster = ecs_cluster,
            task_definition = task_def,
            security_group = security_group,
            desired_count = number_of_tasks
        )
        
        locust_service.enable_cloud_map(name=role)
        
        # Create the ALB to present the Locust UI 
        if role != "slave":
            self.lb = elbv2.ApplicationLoadBalancer(self, "LoustLB", vpc=vpc, internet_facing=True)
            listener = self.lb.add_listener("Listener", port=80)
            listener.add_targets("ECS1",
                port=80,
                targets=[locust_service]
            )
            core.CfnOutput(
                self, "lburl",
                description = "URL for ALB fronting locust master",
                value = self.lb.load_balancer_dns_name
                )
Esempio n. 6
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        alb_subnet = ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                             name="ALB",
                                             cidr_mask=24)

        db_subnet = ec2.SubnetConfiguration(
            subnet_type=ec2.SubnetType.ISOLATED, name="DB", cidr_mask=24)

        # VPC
        vpc = ec2.Vpc(
            self,
            "VPC",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 2 groups in 2 AZs = 4 subnets.
            subnet_configuration=[alb_subnet, db_subnet],
            nat_gateway_provider=ec2.NatProvider.gateway(),
            nat_gateways=1,
        )
        # Security groups
        # Create Security group that allows traffic into the ALB
        alb_security_group = ec2.SecurityGroup(
            self,
            "ALBSecurityGroup",
            description="Ghost ALB Security Group",
            vpc=vpc)
        alb_security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                            ec2.Port.tcp(80),
                                            "allow HTTP to ALB")
        alb_security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                            ec2.Port.tcp(443),
                                            "allow HTTPS to ALB")

        # Create Security group for the host/ENI/Fargate that allows 2368
        fargate_security_group = ec2.SecurityGroup(
            self,
            "FargateSecurityGroup",
            description="Ghost ECS Fargate Security Group",
            vpc=vpc)
        fargate_security_group.add_ingress_rule(
            alb_security_group, ec2.Port.tcp(2368),
            "allow ghost default 2368 to fargate")

        # Create the DB's Security group which only allows access to memebers of the Ghost Fargate SG
        db_security_group = ec2.SecurityGroup(
            self,
            "DBSecurityGroup",
            description="Security group for RDS DB Instance for ghost cms",
            vpc=vpc)
        db_security_group.add_ingress_rule(
            fargate_security_group, ec2.Port.tcp(3306),
            "allow ghost fargate host to connect to db")

        ghost_alb = elb.ApplicationLoadBalancer(
            self,
            "GhostALB",
            internet_facing=True,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=alb_security_group,
            vpc=vpc)

        ghost_target_health_check = elb.HealthCheck(
            interval=core.Duration.seconds(30),
            protocol=elb.Protocol.HTTP,
            timeout=core.Duration.seconds(10),
            healthy_threshold_count=4,
            unhealthy_threshold_count=3,
            healthy_http_codes="200,301")

        ghost_target_group = elb.ApplicationTargetGroup(
            self,
            "GhostTargetGroup",
            port=2368,
            protocol=elb.Protocol.HTTP,
            vpc=vpc,
            health_check=ghost_target_health_check,
            target_type=elb.TargetType.IP)

        ghost_alb_listener = elb.ApplicationListener(
            self,
            "Listener80",
            port=80,
            protocol=elb.Protocol.HTTP,
            load_balancer=ghost_alb,
            default_target_groups=[ghost_target_group])

        core.CfnOutput(self, "vpcid", value=vpc.vpc_id)
        core.CfnOutput(self,
                       "alb_url",
                       description="ALB URL",
                       value=ghost_alb.load_balancer_dns_name)

        self.output_props = props.copy()
        self.output_props['vpc'] = vpc
        self.output_props['subnets'] = vpc.public_subnets
        self.output_props['alb_security_group'] = alb_security_group
        self.output_props['fargate_security_group'] = fargate_security_group
        self.output_props['db_security_group'] = db_security_group
Esempio n. 7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        account_id = core.Aws.ACCOUNT_ID
        lambda_bucket = s3.Bucket(
            self,
            'lambda-bucket',
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name +
            '-lambda-deploy-packages',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.RETAIN)

        ssm.StringParameter(self,
                            'ssm-lambda-bucket',
                            parameter_name='/' + env_name +
                            '/lambda-s3-bucket',
                            string_value=lambda_bucket.bucket_name)

        #To Store Build Artifacts

        artifacts_bucket = s3.Bucket(
            self,
            "build-artifacts",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name + '-build-artifacts',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       's3-build-artifacts-export',
                       value=artifacts_bucket.bucket_name,
                       export_name='build-artifacts-bucket')

        #To Store Frontend App

        frontend_bucket = s3.Bucket(
            self,
            "frontend",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name + '-frontend',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-frontend-export',
                       value=frontend_bucket.bucket_name,
                       export_name='frontend-bucket')

        #CloudTrail Bucket

        self.cloudtrail_bucket = s3.Bucket(
            self,
            "cloudtrail",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name + '-cloudtrail',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))
Esempio n. 8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # store
        dynamodb_table = dynamodb.Table(
            self,
            'dynamodb_table',
            table_name=f'{PROJECT}_{STAGE}',
            partition_key=dynamodb.Attribute(
                name='date', type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            point_in_time_recovery=False,
            removal_policy=core.RemovalPolicy.DESTROY,
            server_side_encryption=True,
        )

        # public api
        public_api = appsync.CfnGraphQLApi(
            self,
            'public_api',
            name=f'{PROJECT}_{STAGE}',
            authentication_type='API_KEY',
        )

        now = time.localtime()
        epoch = time.mktime(now)
        public_api_key = appsync.CfnApiKey(
            self,
            'public_api_key',
            api_id=public_api.attr_api_id,
            expires=epoch + core.Duration.days(90).to_seconds(),
        )

        with open('schema.gql', mode='r') as f:
            graphql_schema = f.read()

            appsync.CfnGraphQLSchema(self,
                                     'public_api_schema',
                                     api_id=public_api.attr_api_id,
                                     definition=graphql_schema)

        public_api_role = iam.Role(
            self,
            'public_api_role',
            assumed_by=iam.ServicePrincipal('appsync.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonDynamoDBFullAccess')
            ],
        )

        public_api_datasource = appsync.CfnDataSource(
            self,
            'public_api_datasource',
            api_id=public_api.attr_api_id,
            name=f'{PROJECT}_{STAGE}_dynamodb',
            type='AMAZON_DYNAMODB',
            dynamo_db_config={
                'awsRegion': 'us-east-1',
                'tableName': dynamodb_table.table_name,
            },
            service_role_arn=public_api_role.role_arn,
        )

        with open('mapping_templates/get_holiday.json', mode='r') as f:
            get_holiday_json = f.read()

            appsync.CfnResolver(
                self,
                'public_api_resolver_get_holiday',
                api_id=public_api.attr_api_id,
                type_name='Query',
                field_name='getHoliday',
                data_source_name=public_api_datasource.attr_name,
                kind='UNIT',
                request_mapping_template=get_holiday_json,
                response_mapping_template='$util.toJson($context.result)',
            )

        with open('mapping_templates/list_holidays.json', mode='r') as f:
            list_holidays_json = f.read()

            appsync.CfnResolver(
                self,
                'public_api_resolver_list_holidays',
                api_id=public_api.attr_api_id,
                type_name='Query',
                field_name='listHolidays',
                data_source_name=public_api_datasource.attr_name,
                kind='UNIT',
                request_mapping_template=list_holidays_json,
                response_mapping_template='$util.toJson($context.result)',
            )

        # lambda source code upload to s3
        lambda_assets = s3_assets.Asset(self,
                                        'lambda_assets',
                                        path='./function/.artifact/')

        # update function
        func_api = lambda_.Function(
            self,
            f'{PROJECT}-{STAGE}-func',
            function_name=f'{PROJECT}-{STAGE}-func',
            code=lambda_.Code.from_bucket(bucket=lambda_assets.bucket,
                                          key=lambda_assets.s3_object_key),
            handler='app.handler',
            runtime=lambda_.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(120),
            log_retention=logs.RetentionDays.SIX_MONTHS,
            memory_size=128,
            tracing=lambda_.Tracing.ACTIVE,
        )
        func_api.add_environment('TABLE_NAME', dynamodb_table.table_name)
        func_api.add_environment('CSV_URL', CSV_URL)
        func_api.add_to_role_policy(
            iam.PolicyStatement(
                actions=[
                    'dynamodb:Get*',
                    'dynamodb:Put*',
                    'dynamodb:Batch*',
                ],
                resources=[dynamodb_table.table_arn],
            ))

        # schedule execute
        events.Rule(
            self,
            f'{PROJECT}-{STAGE}-schedule',
            enabled=True,
            schedule=events.Schedule.rate(core.Duration.days(10)),
            targets=[events_targets.LambdaFunction(func_api)],
        )

        # lambda@edge
        func_lambdaedge = lambda_.Function(
            self,
            f'{PROJECT}-{STAGE}-func-lambdaedge',
            function_name=f'{PROJECT}-{STAGE}-func-lambdaedge',
            code=lambda_.Code.from_inline(
                open('./function/src/lambdaedge.py').read().replace(
                    '__X_API_KEY__', public_api_key.attr_api_key)),
            handler='index.handler',
            runtime=lambda_.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(30),
            memory_size=128,
            role=iam.Role(
                self,
                f'{PROJECT}-{STAGE}-func-lambdaedge-role',
                assumed_by=iam.CompositePrincipal(
                    iam.ServicePrincipal('edgelambda.amazonaws.com'),
                    iam.ServicePrincipal('lambda.amazonaws.com'),
                ),
                managed_policies=[
                    iam.ManagedPolicy.from_aws_managed_policy_name(
                        'service-role/AWSLambdaBasicExecutionRole'),
                ],
            ),
        )
        lambdaedge_version = func_lambdaedge.add_version(
            hashlib.sha256(
                open('./function/src/lambdaedge.py').read().replace(
                    '__X_API_KEY__',
                    public_api_key.attr_api_key).encode()).hexdigest())

        # ACM
        certificates = acm.Certificate(
            self,
            'certificates',
            domain_name=DOMAIN,
            validation_method=acm.ValidationMethod.DNS,
        )

        # CDN
        cdn = cloudfront.CloudFrontWebDistribution(
            self,
            f'{PROJECT}-{STAGE}-cloudfront',
            origin_configs=[
                cloudfront.SourceConfiguration(
                    behaviors=[
                        # default behavior
                        cloudfront.Behavior(
                            allowed_methods=cloudfront.
                            CloudFrontAllowedMethods.ALL,
                            default_ttl=core.Duration.seconds(0),
                            max_ttl=core.Duration.seconds(0),
                            min_ttl=core.Duration.seconds(0),
                            is_default_behavior=True,
                            lambda_function_associations=[
                                cloudfront.LambdaFunctionAssociation(
                                    event_type=cloudfront.LambdaEdgeEventType.
                                    ORIGIN_REQUEST,
                                    lambda_function=lambdaedge_version,
                                ),
                            ])
                    ],
                    custom_origin_source=cloudfront.CustomOriginConfig(
                        domain_name=core.Fn.select(
                            2, core.Fn.split('/',
                                             public_api.attr_graph_ql_url)), ),
                )
            ],
            alias_configuration=cloudfront.AliasConfiguration(
                acm_cert_ref=certificates.certificate_arn,
                names=[DOMAIN],
                security_policy=cloudfront.SecurityPolicyProtocol.
                TLS_V1_2_2018,
            ),
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
        )
        core.CfnOutput(
            self,
            'cloudfront-domain',
            value=cdn.domain_name,
        )
Esempio n. 9
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 inst_sg: ec2.SecurityGroup, role: iam.IRole, keyname: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cfnhup_restart_handle = ec2.InitServiceRestartHandle()
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_autoscaling/AutoScalingGroup.html#aws_cdk.aws_autoscaling.AutoScalingGroup.apply_cloud_formation_init
        # Note that configuring init metadata also implies that
        # cfn-init and cfn-signal are implicitly added to UserData. Signals property must be configured
        # creation policy is updated to wait for cfn-init to finish. (no need to specify in user data)
        asg = autoscaling.AutoScalingGroup(
            self,
            "PublicInstanceASG",
            role=role,
            vpc=vpc,
            instance_type=ec2.InstanceType(
                instance_type_identifier=instance_type),
            machine_image=ec2.AmazonLinuxImage(
                edition=ec2.AmazonLinuxEdition.STANDARD,
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
                virtualization=ec2.AmazonLinuxVirt.HVM,
                storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE),
            user_data=ec2.UserData.custom(core.Fn.sub(user_data_public)),
            key_name=keyname,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            desired_capacity=2,
            max_capacity=2,
            min_capacity=2,
            signals=autoscaling.Signals.wait_for_all(
                timeout=core.Duration.minutes(5)),
            security_group=inst_sg)
        asg_logical_id = str(asg.node.default_child.logical_id)
        #print("logical id:"+str(asg.node.default_child.logical_id))
        #print("logical id:"+asg.auto_scaling_group_name)
        # The shell environment variable $ASGLOGICALID was set correctly in UserData, and persisted to when config set is executed with ec2.InitCommand.
        # However, I had a hard time accessing logical resource ID of AutoScalingGroup because ec2.InitFile does not replace shell environment variable with actual value.
        # I have tried all the followings to no avail:
        #    1. When referenced as ${ASGLOGICALID} or as "${ASGLOGICALID}", it was interpreted as a literal resource name and cause ValidationError
        #    2. When referenced as $ASGLOGICALID, it was interpreted
        # As a result, I had to split the initialization of asg into three steps.
        #    1. initialize asg without init, or init_options
        #    2. store logical ID of asg into python variable asg_logical_id
        #    3. add init and init_options to asg, which uses the value of asg_logical_id.
        asg.apply_cloud_formation_init(ec2.CloudFormationInit.from_config_sets(
            config_sets={
                "config_set_1": ["config_step1", "config_step2"],
                "config_set_2":
                ["config_step3", "config_step4", "config_step5"]
            },
            configs={
                "config_step1":
                ec2.InitConfig([ec2.InitPackage.yum("git")]),
                "config_step2":
                ec2.InitConfig([
                    ec2.InitCommand.shell_command(
                        "echo configset thinks ASGLOGICALID=$ASGLOGICALID")
                ]),
                "config_step3":
                ec2.InitConfig([
                    ec2.InitFile.from_string(
                        file_name="/etc/cfn/hooks.d/cfn-auto-reloader.conf",
                        content=core.Fn.sub('\n'.join((
                            "[cfn-auto-reloader-hook]", "triggers=post.update",
                            "path=Resources." + asg_logical_id +
                            ".Metadata.AWS::CloudFormation::Init",
                            "action=/opt/aws/bin/cfn-init -v --stack ${AWS::StackName} --resource "
                            + asg_logical_id +
                            " --configsets config_set_1,config_set_2"))),
                        group='root',
                        owner='root',
                        mode='000644',
                        service_restart_handles=[cfnhup_restart_handle])
                ]),
                "config_step4":
                ec2.InitConfig([
                    ec2.InitFile.from_string(
                        file_name="/etc/cfn/cfn-hup.conf",
                        content=core.Fn.sub('\n'.join(
                            ("[main]", "stack=${AWS::StackId}",
                             "region=${AWS::Region}", "verbose=true",
                             "interval=5"))),
                        owner='root',
                        group='root',
                        mode='000644',
                        service_restart_handles=[cfnhup_restart_handle])
                ]),
                "config_step5":
                ec2.InitConfig([
                    ec2.InitService.enable(
                        service_name='cfn-hup',
                        enabled=True,
                        ensure_running=True,
                        service_restart_handle=cfnhup_restart_handle)
                ])
            }),
                                       config_sets=[
                                           "config_set_1", "config_set_2"
                                       ],
                                       print_log=True)
        core.CfnOutput(self,
                       "Output",
                       value='logical resource id of asg: ' +
                       str(asg.node.default_child.logical_id))
Esempio n. 10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ### S3 ###

        source_csv_bucket = _s3.Bucket(self,
                                       "BYODValidationSourceBucket",
                                       versioned=True)

        target_csv_bucket = _s3.Bucket(
            self,
            "BYODValidationTargetBucket",
            removal_policy=core.RemovalPolicy.RETAIN)

        ### Cognito ###

        userpool = _cognito.UserPool(self,
                                     "WebToolUserPool",
                                     user_pool_name="byod-webtool-userpool",
                                     self_sign_up_enabled=True,
                                     auto_verify={
                                         "email": True,
                                         "phone": False
                                     },
                                     user_verification={
                                         "email_subject":
                                         "Your verification code",
                                         "email_body":
                                         "Your verification code is {####}",
                                         "email_style":
                                         _cognito.VerificationEmailStyle.CODE
                                     },
                                     standard_attributes={
                                         "email": {
                                             "required": True,
                                             "mutable": False
                                         }
                                     },
                                     password_policy={})
        client = userpool.add_client("webtool-app-client",
                                     auth_flows={
                                         "custom": True,
                                         "user_password": True,
                                         "user_srp": True,
                                         "refresh_token": True
                                     })
        identity_pool = _cognito.CfnIdentityPool(
            self,
            "WebToolCognitoIdentityPool",
            allow_unauthenticated_identities=True)
        identity_pool.add_property_override(
            "CognitoIdentityProviders",
            [{
                "ClientId": client.user_pool_client_id,
                "ProviderName": userpool.user_pool_provider_name
            }])
        auth_role = _iam.Role(
            self,
            "CognitoAuthRole",
            assumed_by=FederatedPrincipal(
                "cognito-identity.amazonaws.com", {
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "authenticated"
                    }
                }))
        auth_role.add_to_policy(
            PolicyStatement(effect=Effect.ALLOW,
                            actions=["s3:GetObject"],
                            resources=["%s/*" % target_csv_bucket.bucket_arn]))
        unauth_role = _iam.Role(
            self,
            "CognitoUnauthRole",
            assumed_by=_iam.FederatedPrincipal(
                "cognito-identity.amazonaws.com",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "unauthenticated"
                    }
                }))
        identity_pool_policy = _cognito.CfnIdentityPoolRoleAttachment(
            self,
            "WebToolCognitoIdentityPoolPolicy",
            identity_pool_id=identity_pool.ref,
            roles={
                'unauthenticated': unauth_role.role_arn,
                'authenticated': auth_role.role_arn
            })
        core.CfnOutput(self, "UserPoolId", value=userpool.user_pool_id)
        core.CfnOutput(self, "ClientId", value=client.user_pool_client_id)
        core.CfnOutput(self,
                       "ProviderName",
                       value=userpool.user_pool_provider_name)

        ### DynamoDB ###

        validation_job_table = _dynamodb.Table(
            self,
            "ValidationJobTable",
            partition_key=_dynamodb.Attribute(
                name="id", type=_dynamodb.AttributeType.STRING))

        ## AppSync ###

        api = _appsync.GraphqlApi(
            self,
            "Api",
            name="validation-job-api",
            schema=_appsync.Schema.from_asset(
                os.path.join(dirname, "api", "schema.graphql")),
            authorization_config=AuthorizationConfig(
                default_authorization=AuthorizationMode(
                    authorization_type=AuthorizationType.USER_POOL,
                    user_pool_config=UserPoolConfig(user_pool=userpool))),
            log_config=LogConfig(exclude_verbose_content=False,
                                 field_log_level=FieldLogLevel.ALL))
        api_ds = api.add_dynamo_db_data_source("ValidationJobDataSource",
                                               validation_job_table)
        core.CfnOutput(self, "GraphQLEndpoint", value=api.graphql_url)

        ### SQS ###

        validation_job_queue = _sqs.Queue(self, "ValidationJobQueue")

        profiling_job_queue = _sqs.Queue(self, "ProfilingJobQueue")

        ### Lambda ###

        validation_trigger_function = _lambda.Function(
            self,
            "ValidationTriggerFunction",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(dirname, "lambda", "validation_trigger")),
            handler='lambda_function.lambda_handler')

        validation_trigger_function.add_environment(
            "TABLE_NAME", validation_job_table.table_name)
        validation_trigger_function.add_environment(
            "QUEUE_URL", validation_job_queue.queue_url)

        validation_trigger_function.add_event_source(
            _S3EventSource(source_csv_bucket,
                           events=[_s3.EventType.OBJECT_CREATED]))

        source_csv_bucket.grant_read(validation_trigger_function)
        validation_job_table.grant_read_write_data(validation_trigger_function)
        validation_job_queue.grant_send_messages(validation_trigger_function)

        stager_function = _lambda.Function(self,
                                           "StagerFunction",
                                           runtime=_lambda.Runtime.NODEJS_12_X,
                                           code=_lambda.Code.from_asset(
                                               os.path.join(
                                                   dirname, "lambda",
                                                   "stager")),
                                           handler='index.handler')

        stager_function.add_environment("REGION", self.region)
        stager_function.add_environment("SOURCE_BUCKET",
                                        source_csv_bucket.bucket_name)
        stager_function.add_environment("STAGE_BUCKET",
                                        target_csv_bucket.bucket_name)
        source_csv_bucket.grant_read(stager_function)
        target_csv_bucket.grant_put(stager_function)

        ### ECS Fargate ###

        validation_fargate_asset = _ecr_assets.DockerImageAsset(
            self,
            "ValidationBuildImage",
            directory=os.path.join(dirname, "fargate", "validation"))
        profiling_fargate_asset = _ecr_assets.DockerImageAsset(
            self,
            "ProfilingBuildImage",
            directory=os.path.join(dirname, "fargate", "profiling"))

        vpc = _ec2.Vpc(self, "VPC", max_azs=3)
        cluster = _ecs.Cluster(self, "ECSCluster", vpc=vpc)

        validation_fargate_service = _ecs_patterns.QueueProcessingFargateService(
            self,
            "ValidationFargateService",
            cluster=cluster,
            cpu=4096,
            memory_limit_mib=30720,
            enable_logging=True,
            image=_ecs.ContainerImage.from_docker_image_asset(
                validation_fargate_asset),
            environment={
                "TABLE_NAME": validation_job_table.table_name,
                "QUEUE_URL": validation_job_queue.queue_url,
                "SOURCE_BUCKET_NAME": source_csv_bucket.bucket_name,
                "TARGET_BUCKET_NAME": target_csv_bucket.bucket_name,
                "REGION": self.region
            },
            queue=validation_job_queue,
            max_scaling_capacity=2,
            max_healthy_percent=200,
            min_healthy_percent=66)
        validation_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonDynamoDBFullAccess"))
        validation_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))

        profiling_fargate_service = _ecs_patterns.QueueProcessingFargateService(
            self,
            "ProfilingFargateService",
            cluster=cluster,
            cpu=4096,
            memory_limit_mib=30720,
            enable_logging=True,
            image=_ecs.ContainerImage.from_docker_image_asset(
                profiling_fargate_asset),
            environment={
                "TABLE_NAME": validation_job_table.table_name,
                "QUEUE_URL": profiling_job_queue.queue_url,
                "SOURCE_BUCKET_NAME": source_csv_bucket.bucket_name,
                "TARGET_BUCKET_NAME": target_csv_bucket.bucket_name,
                "REGION": self.region
            },
            queue=profiling_job_queue,
            max_scaling_capacity=2,
            max_healthy_percent=200,
            min_healthy_percent=66)
        profiling_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonDynamoDBFullAccess"))
        profiling_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))
Esempio n. 11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        work_dir = pathlib.Path(__file__).parents[1]

        # These below steps allows to reuse ecs cluster which is aleady creatd by shared stack

        # Get cluster name from ssm parameter
        cluster_name = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetClusterName",
            string_parameter_name="/dev/compute/container/ecs-cluster-name"
        ).string_value

        vpc_az = ssm.StringListParameter.from_string_list_parameter_name(
            self,
            "GetVpcAz",
            string_list_parameter_name="/dev/network/vpc/vpc-az"
        ).string_list_value

        # using string instead of stringlist because of subnets parsing issue
        vpc_public_subnets_1 = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetVpcPublicSubnets1",
            string_parameter_name="/dev/network/vpc/vpc-public-subnets-1"
        ).string_value

        vpc_public_subnets_2 = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetVpcPublicSubnets2",
            string_parameter_name="/dev/network/vpc/vpc-public-subnets-2"
        ).string_value

        vpc_id = ssm.StringParameter.from_string_parameter_name(
            self, "GetVpcId",
            string_parameter_name="/dev/network/vpc/vpc-id").string_value

        ec2_vpc = ec2.Vpc.from_vpc_attributes(
            self,
            "GetVpc",
            availability_zones=vpc_az,
            vpc_id=vpc_id,
            public_subnet_ids=[vpc_public_subnets_1, vpc_public_subnets_2])

        # Get security group id from ssm parameter
        security_group_id = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetSgId",
            string_parameter_name="/dev/network/vpc/security-group-id"
        ).string_value

        # Get security group from lookup
        ec2_sgp = ec2.SecurityGroup.from_security_group_id(
            self, "GetSgp", security_group_id=security_group_id)

        # Pass vpc, sgp and ecs cluster name to get ecs cluster info
        ecs_cluster = ecs.Cluster.from_cluster_attributes(
            self,
            "GetEcsCluster",
            cluster_name=cluster_name,
            vpc=ec2_vpc,
            security_groups=[ec2_sgp])

        # Fargate Service
        task_definition = ecs.FargateTaskDefinition(self,
                                                    "TaskDef",
                                                    memory_limit_mib=512,
                                                    cpu=256)

        container = task_definition.add_container(
            "web",
            image=ecs.ContainerImage.from_asset(
                os.path.join(work_dir, "container")),
            environment=dict(name="latest"))

        port_mapping = ecs.PortMapping(container_port=8000,
                                       protocol=ecs.Protocol.TCP)

        container.add_port_mappings(port_mapping)

        # Create Fargate Service
        service = ecs.FargateService(
            self,
            "Service",
            cluster=ecs_cluster,
            task_definition=task_definition,
            assign_public_ip=True,
            deployment_controller=ecs.DeploymentController(
                type=ecs.DeploymentControllerType.CODE_DEPLOY))

        task_definition_rev = ecs.FargateTaskDefinition(
            self,
            "TaskDefinitionNew",
            cpu=256,
            memory_limit_mib=512,
            family=task_definition.family)

        cfn_task_definition = task_definition_rev.node.default_child
        cfn_task_definition.cfn_options.update_replace_policy = core.CfnDeletionPolicy.RETAIN

        container_rev = task_definition_rev.add_container(
            "docker",
            image=ecs.ContainerImage.from_asset(
                os.path.join(work_dir, "container_temp")),
            environment=dict(name="docker-new"))

        container_rev.add_port_mappings(port_mapping)

        # Create Application LoadBalancer
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=ec2_vpc,
                                           internet_facing=True)

        # Add test listener to the LB
        test_listener = lb.add_listener("TestListener", port=8080, open=True)

        # Route to prod container
        test_listener.add_targets("TestFargate", port=8080, targets=[service])

        # Add prod listener to the LB
        prod_listener = lb.add_listener("ProdListener", port=80, open=True)

        # Route to prod container
        prod_listener.add_targets("ProdFargate", port=80, targets=[service])

        # add an output with a well-known name to read it from the integ tests
        url_output = core.CfnOutput(
            self, "UrlOutput", value=f"http://{lb.load_balancer_dns_name}")
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        """
        First step: Create MediaPackage Channel
        """
        channel = mediapackage.CfnChannel(
            scope=self,
            id_="media-package-channel-{0}".format(
                DEFAULT_CONF.get("id_channel")),
            id=DEFAULT_CONF.get("id_channel"),
            description="Channel {0}".format(DEFAULT_CONF.get("id_channel")))
        """
        Second step: Add a HLS endpoint to MediaPackage Channel and output the URL of this endpoint
        """
        hsl_endpoint_package = mediapackage.CfnOriginEndpoint.HlsPackageProperty(
            segment_duration_seconds=DEFAULT_CONF.get(
                "hls_segment_duration_seconds"),
            playlist_window_seconds=DEFAULT_CONF.get(
                "hls_playlist_window_seconds"),
            stream_selection=mediapackage.CfnOriginEndpoint.
            StreamSelectionProperty(
                max_video_bits_per_second=DEFAULT_CONF.get(
                    "hls_max_video_bits_per_second"),
                min_video_bits_per_second=DEFAULT_CONF.get(
                    "hls_min_video_bits_per_second"),
                stream_order=DEFAULT_CONF.get("hls_stream_order")))

        hls_endpoint = mediapackage.CfnOriginEndpoint(
            scope=self,
            id_="endpoint-{0}".format(DEFAULT_CONF.get("id_channel")),
            id=DEFAULT_CONF.get("id_channel"),
            channel_id=format(DEFAULT_CONF.get("id_channel")),
            description="Endpoint - {0}".format(
                DEFAULT_CONF.get("id_channel")),
            hls_package=hsl_endpoint_package)

        # Output the url stream to player
        core.CfnOutput(scope=self,
                       id="media-package-url-stream",
                       value=hls_endpoint.attr_url)
        """
        Third step: Create MediaLive SG, MediaLive Input and MediaLive Channel
        """
        """ 
        Input Security Group
        Allow 0.0.0.0/0 - Modify it if you want """
        security_groups_input = medialive.CfnInputSecurityGroup(
            scope=self,
            id="media-live-sg-input",
            whitelist_rules=[{
                "cidr": DEFAULT_CONF.get("ip_sg_input")
            }])
        """ Input destination """
        media_live_input_destination = medialive.CfnInput.InputDestinationRequestProperty(
            stream_name=DEFAULT_CONF.get("stream_name"))
        """ Input with destinations output """
        medialive_input = medialive.CfnInput(
            scope=self,
            id="media-input-channel",
            name="input-test1",
            type="RTMP_PUSH",
            input_security_groups=[security_groups_input.ref],
            destinations=[media_live_input_destination])
        """ Media Live Channel Block """
        media_live_channel_input_spec = medialive.CfnChannel.InputSpecificationProperty(
            codec="AVC", maximum_bitrate="MAX_20_MBPS", resolution="HD")

        media_live_channel_input_attach = medialive.CfnChannel.InputAttachmentProperty(
            input_id=medialive_input.ref,
            input_attachment_name="attach-input-test1")

        media_live_channel_destination_settings = medialive.CfnChannel.MediaPackageOutputDestinationSettingsProperty(
            channel_id=DEFAULT_CONF.get("id_channel"))
        media_live_channel_destination = medialive.CfnChannel.OutputDestinationProperty(
            id="media-destination",
            media_package_settings=[media_live_channel_destination_settings])
        """
        We need to control bitrate based on user connection. The manifest will include 2 video quality:
        video_720p30 = bitrate 3000000
        video_1080p30 = bitrate 5000000
        Let's go create them
        You can create more resolutions based on bitrate, like:
        video_240p30 = bitrate 750000
        video_480p30 = bitrate 1500000
        """
        # Audio + output + video
        """ Audio Section """
        audio_aac = medialive.CfnChannel.AacSettingsProperty(
            bitrate=192000,
            coding_mode="CODING_MODE_2_0",
            input_type="NORMAL",
            profile="LC",
            rate_control_mode="CBR",
            raw_format="NONE",
            sample_rate=48000,
            spec="MPEG4")
        audio_codec = medialive.CfnChannel.AudioCodecSettingsProperty(
            aac_settings=audio_aac)
        audio1 = medialive.CfnChannel.AudioDescriptionProperty(
            audio_selector_name="Default",
            audio_type_control="FOLLOW_INPUT",
            language_code_control="FOLLOW_INPUT",
            name="audio_1",
            codec_settings=audio_codec)
        audio2 = medialive.CfnChannel.AudioDescriptionProperty(
            audio_selector_name="Default",
            audio_type_control="FOLLOW_INPUT",
            language_code_control="FOLLOW_INPUT",
            name="audio_2",
            codec_settings=audio_codec)
        """ Output Section """
        output_mediapackage_destination_ref = medialive.CfnChannel.OutputLocationRefProperty(
            destination_ref_id="media-destination")
        output_mediapackage_destination = medialive.CfnChannel.MediaPackageGroupSettingsProperty(
            destination=output_mediapackage_destination_ref)
        output_settings = medialive.CfnChannel.OutputGroupSettingsProperty(
            media_package_group_settings=output_mediapackage_destination)

        output_output1 = medialive.CfnChannel.OutputProperty(
            audio_description_names=["audio_1"],
            output_name="1080p30",
            video_description_name="video_1080p30",
            output_settings=medialive.CfnChannel.OutputSettingsProperty(
                media_package_output_settings={}))

        output_output2 = medialive.CfnChannel.OutputProperty(
            audio_description_names=["audio_2"],
            output_name="720p30",
            video_description_name="video_720p30",
            output_settings=medialive.CfnChannel.OutputSettingsProperty(
                media_package_output_settings={}))

        output = medialive.CfnChannel.OutputGroupProperty(
            name="HD",
            output_group_settings=output_settings,
            outputs=[output_output1, output_output2])
        """ Video Section """
        video1_h264 = medialive.CfnChannel.H264SettingsProperty(
            adaptive_quantization="HIGH",
            afd_signaling="NONE",
            bitrate=5000000,
            color_metadata="INSERT",
            entropy_encoding="CABAC",
            flicker_aq="ENABLED",
            framerate_control="SPECIFIED",
            framerate_denominator=1,
            framerate_numerator=30,
            gop_b_reference="ENABLED",
            gop_closed_cadence=1,
            gop_num_b_frames=3,
            gop_size=60,
            gop_size_units="FRAMES",
            level="H264_LEVEL_AUTO",
            look_ahead_rate_control="HIGH",
            num_ref_frames=3,
            par_control="SPECIFIED",
            profile="HIGH",
            rate_control_mode="CBR",
            scan_type="PROGRESSIVE",
            scene_change_detect="ENABLED",
            slices=1,
            spatial_aq="ENABLED",
            syntax="DEFAULT",
            temporal_aq="ENABLED",
            timecode_insertion="DISABLED")
        video1_codec = medialive.CfnChannel.VideoCodecSettingsProperty(
            h264_settings=video1_h264)
        video1_description = medialive.CfnChannel.VideoDescriptionProperty(
            codec_settings=video1_codec,
            height=1080,
            name="video_1080p30",
            respond_to_afd="NONE",
            scaling_behavior="DEFAULT",
            sharpness=50,
            width=1920)

        video2_h264 = medialive.CfnChannel.H264SettingsProperty(
            adaptive_quantization="HIGH",
            afd_signaling="NONE",
            bitrate=3000000,
            color_metadata="INSERT",
            entropy_encoding="CABAC",
            flicker_aq="ENABLED",
            framerate_control="SPECIFIED",
            framerate_denominator=1,
            framerate_numerator=30,
            gop_b_reference="ENABLED",
            gop_closed_cadence=1,
            gop_num_b_frames=3,
            gop_size=60,
            gop_size_units="FRAMES",
            level="H264_LEVEL_AUTO",
            look_ahead_rate_control="HIGH",
            num_ref_frames=3,
            par_control="SPECIFIED",
            profile="HIGH",
            rate_control_mode="CBR",
            scan_type="PROGRESSIVE",
            scene_change_detect="ENABLED",
            slices=1,
            spatial_aq="ENABLED",
            syntax="DEFAULT",
            temporal_aq="ENABLED",
            timecode_insertion="DISABLED")
        video2_codec = medialive.CfnChannel.VideoCodecSettingsProperty(
            h264_settings=video2_h264)
        video2_description = medialive.CfnChannel.VideoDescriptionProperty(
            codec_settings=video2_codec,
            height=720,
            name="video_720p30",
            respond_to_afd="NONE",
            scaling_behavior="DEFAULT",
            sharpness=100,
            width=1280)
        """ Channel final settings and channel start """
        media_live_channel_timecode = medialive.CfnChannel.TimecodeConfigProperty(
            source="EMBEDDED")
        media_live_channel_encoder = medialive.CfnChannel.EncoderSettingsProperty(
            audio_descriptions=[audio1, audio2],
            video_descriptions=[video1_description, video2_description],
            output_groups=[output],
            timecode_config=media_live_channel_timecode)

        policy_document = iam.PolicyDocument.from_json(INLINE_POLICY)
        medialive_role = iam.Role(
            scope=self,
            id='medialive_role',
            role_name='medialive_role',
            assumed_by=iam.ServicePrincipal('medialive.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AWSElementalMediaLiveFullAccess')
            ],
            inline_policies={"medialivecustom": policy_document})

        media_live_channel = medialive.CfnChannel(
            scope=self,
            id="media-live-channel-{0}".format(DEFAULT_CONF.get("id_channel")),
            channel_class="SINGLE_PIPELINE",
            name=DEFAULT_CONF.get("id_channel"),
            input_specification=media_live_channel_input_spec,
            input_attachments=[media_live_channel_input_attach],
            destinations=[media_live_channel_destination],
            encoder_settings=media_live_channel_encoder,
            role_arn=medialive_role.role_arn)

        # We need to add dependency because CFN must wait channel creation finish before starting the endpoint creation
        mediadep = core.ConcreteDependable()
        mediadep.add(channel)
        hls_endpoint.node.add_dependency(mediadep)
        media_live_channel.node.add_dependency(mediadep)
Esempio n. 13
0
    def __init__(self, scope: cdk.Construct, construct_id: str, vpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        """ Read UserData Script: """
        try:
            with open("app_db_stack/user_data/deploy_app.sh", mode="r") as file:
                user_data = file.read()
        except OSError:
            print("Unable to read UserData script")

        """ AMI: """
        linux_ami = _ec2.AmazonLinuxImage(generation=_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
                                          edition=_ec2.AmazonLinuxEdition.STANDARD,
                                          virtualization=_ec2.AmazonLinuxVirt.HVM,
                                          storage=_ec2.AmazonLinuxStorage.GENERAL_PURPOSE
                                          )

        """ Create ALB: """
        alb = _elbv2.ApplicationLoadBalancer(
            self,
            "MyAlbId",
            vpc=vpc,
            internet_facing=True,
            load_balancer_name="WebServerAlb"
        )

        # Allow alb SG to receive traffic from port 80:
        alb.connections.allow_from_any_ipv4(
            _ec2.Port.tcp(80),
            description="Allow Internet access on ALB port 80"
        )

        # Add listener to ALB:
        listener = alb.add_listener("listenerId",
                                    port=80,
                                    open=True)

        """ Web server IAM role: """
        web_server_role = _iam.Role(self, "WebServerRoleId",
                                    assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"),
                                    managed_policies=[
                                        _iam.ManagedPolicy.from_aws_managed_policy_name(
                                            "AmazonSSMManagedInstanceCore",
                                        ),
                                        _iam.ManagedPolicy.from_aws_managed_policy_name(
                                            "AmazonS3ReadOnlyAccess"
                                        )
                                    ])

        """ Create ASG with 2 EC2 instances: """
        self.web_server_asg = _autoscaling.AutoScalingGroup(self,
                                                            "WebServerAsgId",
                                                            vpc=vpc,
                                                            key_name="A4L",
                                                            vpc_subnets=_ec2.SubnetSelection(
                                                                subnet_type=_ec2.SubnetType.PRIVATE
                                                            ),
                                                            instance_type=_ec2.InstanceType(
                                                                instance_type_identifier="t2.micro",
                                                            ),
                                                            machine_image=linux_ami,
                                                            role=web_server_role,
                                                            min_capacity=1,
                                                            max_capacity=3,
                                                            desired_capacity=3,
                                                            user_data=_ec2.UserData.custom(user_data)
                                                            )
        # Allow ASG SG to receive traffic from ALB SG:
        self.web_server_asg.connections.allow_from(alb, _ec2.Port.tcp(80),
                                                   description="Allow ASG SG to receive traffic from ALB SG")

        # Add ASG Instances to the ALB Target Group:
        listener.add_targets("ListenerId", port=80, targets=[self.web_server_asg])

        """ Output of the ALB Domain Name: """
        output_alb_1 = cdk.CfnOutput(self,
                                     "AlbDomainName",
                                     value=f"http://{alb.load_balancer_dns_name}",
                                     description="Web Server ALB Domain Name")

        """ Tags: """
Esempio n. 14
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 stack_log_level: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):
        # Create Kinesis Data Stream
        self.data_pipe_stream = _kinesis.Stream(
            self,
            "dataPipeStream",
            retention_period=core.Duration.hours(24),
            shard_count=1,
            stream_name="data_pipe")

        ########################################
        #######                          #######
        #######   Stream Data Producer   #######
        #######                          #######
        ########################################

        # Read Lambda Code
        try:
            with open(
                    "tumbling_window_stream_analytics/stacks/back_end/lambda_src/stream_data_producer.py",
                    encoding="utf-8",
                    mode="r") as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        data_producer_fn = _lambda.Function(
            self,
            "streamDataProducerFn",
            function_name=f"data_producer_fn",
            description=
            "Produce streaming data events and push to Kinesis stream",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(data_producer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(60),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "STREAM_NAME": f"{self.data_pipe_stream.stream_name}",
                "APP_ENV": "Production",
                "STREAM_AWS_REGION": f"{core.Aws.REGION}"
            })

        # Grant our Lambda Producer privileges to write to Kinesis Data Stream
        self.data_pipe_stream.grant_read_write(data_producer_fn)

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "dataProducerLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "streamDataProducer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{data_producer_fn.function_name}?tab=code",
            description=
            "Produce streaming data events and push to Kinesis stream.")
Esempio n. 15
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        

        cluster = ecs.Cluster(
            self, 'EKSGraviton2',
            vpc=vpc,
            container_insights=True
        )
        
        task_definition = ecs.Ec2TaskDefinition(
            self, "TaskDef")
            
        container_uri = ssm.StringParameter.value_for_string_parameter(self ,"graviton_lab_container_uri")
        
        ecs_ami = ecs.EcsOptimizedAmi(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
                                                             hardware_type=ecs.AmiHardwareType.ARM)
                                                                                
        asg_ecs = cluster.add_capacity("G2AutoScalingGroup",
                         instance_type=ec2.InstanceType("m6g.2xlarge"),
                         machine_image=ecs_ami
                         
        )

        container = task_definition.add_container(
            "web",
            image=ecs.ContainerImage.from_registry(container_uri),
            memory_limit_mib=512,
            logging=ecs.LogDrivers.firelens(
                options={
                    "Name": "cloudwatch",
                    "log_key": "log",
                    "region": "us-east-1",
                    "delivery_stream": "my-stream",
                    "log_group_name": "firelens-fluent-bit",
                    "auto_create_group": "true",
                    "log_stream_prefix": "from-fluent-bit"}
            )    
        )
        port_mapping =  ecs.PortMapping(
            container_port=3000,
            host_port=8080,
            protocol=ecs.Protocol.TCP
        )   

        container.add_port_mappings(port_mapping)
    
        # Create Service
        service = ecs.Ec2Service(
            self, "Service",
            cluster=cluster,
            task_definition=task_definition
        )

        # Create ALB
        lb = elbv2.ApplicationLoadBalancer(
            self, "LB",
            vpc=vpc,
            internet_facing=True
        )
    
        listener = lb.add_listener(
            "PublicListener",
            port=80,
            open=True
        )   

        # Attach ALB to ECS Service
        listener.add_targets(
            "ECS",
            port=80,
            targets=[service]
        )   

        core.CfnOutput(
            self, "LoadBalancerDNS",
            value=lb.load_balancer_dns_name
        )
Esempio n. 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        blueprint_bucket_name = create_blueprint_bucket_name_parameter(self)
        custom_algorithms_ecr_repo_arn = create_custom_algorithms_ecr_repo_arn_parameter(
            self)
        kms_key_arn = create_kms_key_arn_parameter(self)
        algorithm_image_uri = create_algorithm_image_uri_parameter(self)
        model_name = create_model_name_parameter(self)
        model_artifact_location = create_model_artifact_location_parameter(
            self)
        data_capture_location = create_data_capture_location_parameter(self)
        inference_instance = create_inference_instance_parameter(self)

        # Conditions
        custom_algorithms_ecr_repo_arn_provided = create_custom_algorithms_ecr_repo_arn_provided_condition(
            self, custom_algorithms_ecr_repo_arn)
        kms_key_arn_provided = create_kms_key_arn_provided_condition(
            self, kms_key_arn)

        # Resources #
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(
            self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # provision api gateway and lambda for inference using solution constructs
        inference_api_gateway = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "BYOMInference",
            lambda_function_props={
                "runtime":
                lambda_.Runtime.PYTHON_3_8,
                "handler":
                "main.handler",
                "code":
                lambda_.Code.from_bucket(
                    blueprint_bucket, "blueprints/byom/lambdas/inference.zip"),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-inference",
                "proxy": False,
            },
        )
        # add supressions
        inference_api_gateway.lambda_function.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )
        provision_resource = inference_api_gateway.api_gateway.root.add_resource(
            "inference")
        provision_resource.add_method("POST")

        # create Sagemaker role
        sagemaker_role = create_sagemaker_role(
            self,
            "MLOpsRealtimeSagemakerRole",
            custom_algorithms_ecr_arn=custom_algorithms_ecr_repo_arn.
            value_as_string,
            kms_key_arn=kms_key_arn.value_as_string,
            assets_bucket_name=assets_bucket_name.value_as_string,
            input_bucket_name=assets_bucket_name.value_as_string,
            input_s3_location=assets_bucket_name.value_as_string,
            output_s3_location=data_capture_location.value_as_string,
            ecr_repo_arn_provided_condition=
            custom_algorithms_ecr_repo_arn_provided,
            kms_key_arn_provided_condition=kms_key_arn_provided,
        )

        # create sagemaker model
        sagemaker_model = create_sagemaker_model(
            self,
            "MLOpsSagemakerModel",
            execution_role=sagemaker_role,
            primary_container={
                "image":
                algorithm_image_uri.value_as_string,
                "modelDataUrl":
                f"s3://{assets_bucket_name.value_as_string}/{model_artifact_location.value_as_string}",
            },
            tags=[{
                "key": "model_name",
                "value": model_name.value_as_string
            }],
        )

        # Create Sagemaker EndpointConfg
        sagemaker_endpoint_config = create_sagemaker_endpoint_config(
            self,
            "MLOpsSagemakerEndpointConfig",
            sagemaker_model.attr_model_name,
            model_name.value_as_string,
            inference_instance.value_as_string,
            data_capture_location.value_as_string,
            core.Fn.condition_if(kms_key_arn_provided.logical_id,
                                 kms_key_arn.value_as_string,
                                 core.Aws.NO_VALUE).to_string(),
        )

        # create a dependency on the model
        sagemaker_endpoint_config.add_depends_on(sagemaker_model)

        # create Sagemaker endpoint
        sagemaker_endpoint = create_sagemaker_endpoint(
            self,
            "MLOpsSagemakerEndpoint",
            sagemaker_endpoint_config.attr_endpoint_config_name,
            model_name.value_as_string,
        )

        # add dependency on endpoint config
        sagemaker_endpoint.add_depends_on(sagemaker_endpoint_config)

        # Create Lambda - sagemakerendpoint
        LambdaToSagemakerEndpoint(
            self,
            "LambdaSagmakerEndpoint",
            existing_sagemaker_endpoint_obj=sagemaker_endpoint,
            existing_lambda_obj=inference_api_gateway.lambda_function,
        )

        # Outputs #
        core.CfnOutput(
            self,
            id="SageMakerModelName",
            value=sagemaker_model.attr_model_name,
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointConfigName",
            value=sagemaker_endpoint_config.attr_endpoint_config_name,
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointName",
            value=sagemaker_endpoint.attr_endpoint_name,
        )
        core.CfnOutput(
            self,
            id="EndpointDataCaptureLocation",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{data_capture_location.value_as_string}/",
            description=
            "Endpoint data capture location (to be used by Model Monitor)",
        )
Esempio n. 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prefix = "test"
        cidr = "192.168.0.0/16"

        # def name(s): return "{0}/{1}".format(prefix, s)
        def name(s):
            return "{0} {1}".format(prefix, s)

        # VPC
        self.vpc = ec2.CfnVPC(self,
                              "vpc",
                              cidr_block=cidr,
                              enable_dns_hostnames=True,
                              enable_dns_support=True,
                              tags=[core.CfnTag(key="Name", value=prefix)])

        # InternetGateway
        igw = ec2.CfnInternetGateway(
            self, "igw", tags=[core.CfnTag(key="Name", value=prefix)])
        igw_attachment = ec2.CfnVPCGatewayAttachment(
            self,
            "igw_attachment",
            vpc_id=self.vpc.ref,
            internet_gateway_id=igw.ref)
        dhcpoptions = ec2.CfnDHCPOptions(
            self,
            "dhcpoptions",
            domain_name="ec2.internal " + prefix,
            domain_name_servers=["AmazonProvidedDNS"],
            tags=[core.CfnTag(key="Name", value=prefix)])
        dhcpoptionsassociation = ec2.CfnVPCDHCPOptionsAssociation(
            self,
            "dhcpoptionsassociation",
            dhcp_options_id=dhcpoptions.ref,
            vpc_id=self.vpc.ref)

        # PrivateSubnetA
        # private_subnet_a = ec2.CfnSubnet(
        #     self, "private_a",
        #     vpc_id=vpc.ref,
        #     cidr_block="192.168.0.0/24",
        #     availability_zone="ap-northeast-1a",
        #     tags=[
        #         core.CfnTag(key="Name", value=name("private_a"))
        #     ]
        # )
        # PrivateSubnetC
        # private_subnet_c = ec2.CfnSubnet(
        #     self, "private_c",
        #     vpc_id=vpc.ref,
        #     cidr_block="192.168.1.0/24",
        #     availability_zone="ap-northeast-1c",
        #     tags=[
        #         core.CfnTag(key="Name", value=name("private_c"))
        #     ]
        # )

        # PublicSubnetA
        self.public_subnet_a = ec2.CfnSubnet(
            self,
            "public_a",
            vpc_id=self.vpc.ref,
            cidr_block="192.168.0.0/20",
            # availability_zone="ap-northeast-1a",
            availability_zone="us-east-1a",
            tags=[core.CfnTag(key="Name", value=prefix + " public_a")])
        # PublicSubnetC
        self.public_subnet_c = ec2.CfnSubnet(
            self,
            "public_c",
            vpc_id=self.vpc.ref,
            cidr_block="192.168.16.0/20",
            availability_zone="us-east-1c",
            tags=[core.CfnTag(key="Name", value=prefix + " public_c")])
        self.public_subnet_d = ec2.CfnSubnet(
            self,
            "public_d",
            vpc_id=self.vpc.ref,
            cidr_block="192.168.32.0/20",
            availability_zone="us-east-1d",
            tags=[core.CfnTag(key="Name", value=prefix + " public_d")])

        # EIP1 (for NATGW)
        # eip1 = ec2.CfnEIP(
        #     self, "eip1",
        #     domain="vpc",
        # )
        # eip1.add_depends_on(igw_attachment)

        # EIP2 (for NATGW)
        # eip2 = ec2.CfnEIP(
        #     self, "eip2",
        #     domain="vpc",
        # )
        # eip2.add_depends_on(igw_attachment)

        # NatGatewayA
        # natgw_a = ec2.CfnNatGateway(
        #     self, "natgw_a",
        #     allocation_id=eip1.attr_allocation_id,
        #     subnet_id=self.public_subnet_a.ref,
        #     tags=[
        #         core.CfnTag(key="Name", value=name("natgw_a"))
        #     ]
        # )
        # NatGatewayC
        # natgw_c = ec2.CfnNatGateway(
        #     self, "natgw_c",
        #     allocation_id=eip2.attr_allocation_id,
        #     subnet_id=public_subnet_c.ref,
        #     tags=[
        #         core.CfnTag(key="Name", value=name("natgw_c"))
        #     ]
        # )

        # RouteTable of PrivateSubnetA
        # rtb_private_a = ec2.CfnRouteTable(
        #     self, "rtb_private_a",
        #     vpc_id=vpc.ref,
        #     tags=[
        #         core.CfnTag(key="Name", value=name("rtb_private_a"))
        #     ]
        # )
        # ec2.CfnSubnetRouteTableAssociation(
        #     self, "rtb_private_a_association",
        #     route_table_id=rtb_private_a.ref,
        #     subnet_id=private_subnet_a.ref
        # )
        # ec2.CfnRoute(
        #     self, "route_private_a",
        #     route_table_id=rtb_private_a.ref,
        #     destination_cidr_block="0.0.0.0/0",
        #     nat_gateway_id=natgw_a.ref
        # )

        # RouteTable of PrivateSubnetC
        # rtb_private_c = ec2.CfnRouteTable(
        #     self, "rtb_private_c",
        #     vpc_id=vpc.ref,
        #     tags=[
        #         core.CfnTag(key="Name", value=name("rtb_private_c"))
        #     ]
        # )
        # ec2.CfnSubnetRouteTableAssociation(
        #     self, "rtb_private_c_association",
        #     route_table_id=rtb_private_c.ref,
        #     subnet_id=private_subnet_c.ref
        # )
        # ec2.CfnRoute(
        #     self, "route_private_c",
        #     route_table_id=rtb_private_c.ref,
        #     destination_cidr_block="0.0.0.0/0",
        #     nat_gateway_id=natgw_c.ref
        # )

        # RouteTable of PublicSubnetA
        self.rtb_public_a = ec2.CfnRouteTable(
            self,
            "rtb_public_a",
            vpc_id=self.vpc.ref,
            tags=[core.CfnTag(key="Name", value=prefix + "rtb_public_a")])
        ec2.CfnSubnetRouteTableAssociation(
            self,
            "rtb_public_a_association",
            route_table_id=self.rtb_public_a.ref,
            subnet_id=self.public_subnet_a.ref)
        ec2.CfnSubnetRouteTableAssociation(
            self,
            "rtb_public_c_association",
            route_table_id=self.rtb_public_a.ref,
            subnet_id=self.public_subnet_c.ref)
        ec2.CfnSubnetRouteTableAssociation(
            self,
            "rtb_public_d_association",
            route_table_id=self.rtb_public_a.ref,
            subnet_id=self.public_subnet_d.ref)
        ec2.CfnRoute(self,
                     "route_public_a",
                     route_table_id=self.rtb_public_a.ref,
                     destination_cidr_block="0.0.0.0/0",
                     gateway_id=igw.ref)

        # RouteTable of PublicSubnetC
        # rtb_public_c = ec2.CfnRouteTable(
        #     self, "rtb_public_c",
        #     vpc_id=vpc.ref,
        #     tags=[
        #         core.CfnTag(key="Name", value=name("rtb_public_c"))
        #     ]
        # )
        # ec2.CfnSubnetRouteTableAssociation(
        #     self, "rtb_public_c_association",
        #     route_table_id=rtb_public_c.ref,
        #     subnet_id=public_subnet_c.ref
        # )
        # ec2.CfnRoute(
        #     self, "route_public_c",
        #     route_table_id=rtb_public_c.ref,
        #     destination_cidr_block="0.0.0.0/0",
        #     gateway_id=igw.ref
        # )

        ami_id = ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration.
                                      AMAZON_LINUX_2).get_image(self).image_id

        # security_group = ec2.SecurityGroup(
        #     self,
        #     id='test',
        #     vpc=self.vpc,
        #     security_group_name='test-security-group'
        # )

        # security_group.add_ingress_rule(
        #     peer=ec2.Peer.ipv4(cidr),
        #     connection=ec2.Port.tcp(22),
        # )

        # red_web_inst = ec2.CfnInstance(self,
        #     "testInstance01",
        #     image_id = ami_id,
        #     instance_type = "t3a.micro",
        #     monitoring = False,
        #     key_name = "stg-intrinio-www01",
        #     security_group_ids=[security_group.security_group_id],
        #     block_device_mappings = [{
        #     "deviceName": "/dev/xvda",
        #     "ebs": {
        #         "volumeSize": 10,
        #         "volumeType": "io1",
        #         "iops": 150,
        #         "deleteOnTermination": True
        #             }
        #         }
        #     ],
        #     tags = [
        #         { "key": "Name", "value": prefix }
        #     ],
        #     network_interfaces = [{
        #         "deviceIndex": "0",
        #         "associatePublicIpAddress": True,
        #         "subnetId": self.public_subnet_a.ref,
        #         # "groupSet": [web_sg.security_group_id]
        #     }], #https: //github.com/aws/aws-cdk/issues/3419
        # )
        # RdsSecurityGroup
        RdsSecurityGroupStg = ec2.CfnSecurityGroup(
            self,
            "RdsSecurityGroupStg",
            group_name='stg-' + prefix + '-db01',
            group_description='stg-' + prefix + '-db01',
            vpc_id=self.vpc.ref,
            security_group_ingress=[{
                "ipProtocol": "tcp",
                "fromPort": 3306,
                "toPort": 3306,
                "cidrIp": "0.0.0.0/0"
            }],
            security_group_egress=[{
                "ipProtocol": "tcp",
                "fromPort": 0,
                "toPort": 65535,
                "cidrIp": "0.0.0.0/0"
            }],
        )
        # MyDBSubnetGroup
        rds_subnet_group = rds.CfnDBSubnetGroup(
            self,
            "DBSubnetGroup",
            db_subnet_group_description="DBSubnetGroup",
            subnet_ids=[
                self.public_subnet_a.ref, self.public_subnet_c.ref,
                self.public_subnet_d.ref
            ])
        DBParameterGroupStg = rds.CfnDBParameterGroup(
            self,
            "DBParameterGroupStg",
            description='stg-' + prefix + 'db01',
            family="MySQL5.6",
            parameters={
                'character_set_client': "utf8",
                'character_set_connection': "utf8",
                'character_set_database': "utf8",
                'character_set_results': "utf8",
                'character_set_server': "utf8",
                'collation_connection': "utf8_general_ci",
                'collation_server': "utf8_general_ci",
                'long_query_time': "1.2",
                'slow_query_log': "1",
                'time_zone': "Asia/Tokyo",
            },
            tags=[core.CfnTag(key="Name", value='stg-' + prefix + 'db01')])
        rds_params = {
            'db_instance_identifier': "stg-test-db01",
            'engine': "mysql",
            'engine_version': '5.6.39',
            'db_instance_class': 'db.t3.micro',
            'allocated_storage': '5',
            'storage_type': 'gp2',
            'db_name': "test",
            'master_username': "******",
            'master_user_password': "******",
            'db_subnet_group_name': rds_subnet_group.ref,
            'publicly_accessible': False,
            'multi_az': False,
            'preferred_backup_window': "18:00-18:30",
            'preferred_maintenance_window': "sat:19:00-sat:19:30",
            'auto_minor_version_upgrade': False,
            'db_parameter_group_name': DBParameterGroupStg.ref,
            'vpc_security_groups': [RdsSecurityGroupStg.ref],
            'copy_tags_to_snapshot': True,
            'backup_retention_period': 7,
            # 'enable_performance_insights': True,
            'delete_automated_backups': True,
            'deletion_protection': False,
            'availability_zone': "us-east-1a",
            'enable_cloudwatch_logs_exports': ["error", "slowquery"]
            # 'storage_encrypted': False,
        }

        self.rds = rds.CfnDBInstance(
            self,
            'staff-rds',
            **rds_params,
            tags=[core.CfnTag(key="Name", value='stg-' + prefix + 'db01')])

        core.CfnOutput(self, "OutputVpc", value=self.vpc.ref)
        core.CfnOutput(self, "OutputRds", value=self.rds.ref)
Esempio n. 18
0
    def __init__(self, scope: core.Construct, id: str,vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        self.lambda_sg = ec2.SecurityGroup(self, 'lambdasg',
            security_group_name='lambda-sg',
            vpc=vpc,
            description="SG for Lambda Functions",
            allow_all_outbound=True
        )
        
        self.bastion_sg = ec2.SecurityGroup(self, 'bastionsg',
            security_group_name='bastion-sg',
            vpc=vpc,
            description="SG for Bastion Host",
            allow_all_outbound=True
        )

        self.bastion_sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), "SSH Access")
        
        redis_sg = ec2.SecurityGroup(self, 'redissg',
            security_group_name='redis-sg',
            vpc=vpc,
            description="SG for Redis Cluster",
            allow_all_outbound=True
        )
        redis_sg.add_ingress_rule(self.lambda_sg, ec2.Port.tcp(6379), 'Access from Lambda functions')

        #Kibana
        self.kibana_sg = ec2.SecurityGroup(self, 'kibanasg',
            security_group_name='kibana-sg',
            vpc=vpc,
            description='SG for Kibana',
            allow_all_outbound=True
        )
        self.kibana_sg.add_ingress_rule(self.bastion_sg, ec2.Port.tcp(443), "Access from jumpbox")
        
        kibana_role = iam.CfnServiceLinkedRole(self,'kibanarole',
            aws_service_name="es.amazonaws.com"
        )


        lambda_role = iam.Role(self, 'lambdarole',
            assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'),
            role_name='lambda-role',
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name='service-role/AWSLambdaVPCAccessExecutionRole'
            )]
        )
        
        lambda_role.add_to_policy(
            statement=iam.PolicyStatement(
                actions=['s3:*', 'rds:*'],
                resources=['*']
            )
        )
        

        core.CfnOutput(self, 'redis-export',
            export_name='redis-sg-export',
            value=redis_sg.security_group_id
        )
        #SSM Parameters
        ssm.StringParameter(self, 'lambdasg-param',
            parameter_name='/'+env_name+'/lambda-sg',
            string_value=self.lambda_sg.security_group_id
        )

        ssm.StringParameter(self, 'lambdarole-param-arn',
            parameter_name='/'+env_name+'/lambda-role-arn',
            string_value=lambda_role.role_arn
        )
        ssm.StringParameter(self, 'lambdarole-param-name',
            parameter_name='/'+env_name+'/lambda-role-name',
            string_value=lambda_role.role_name
        )

        
        
Esempio n. 19
0
    def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        # Setup SSM parameter of credentials, bucket_para, ignore_list
        ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=1)

        ssm_bucket_para = ssm.StringParameter(self,
                                              "s3bucket_serverless",
                                              string_value=json.dumps(
                                                  bucket_para, indent=4))

        ssm_parameter_ignore_list = ssm.StringParameter(
            self, "s3_migrate_ignore_list", string_value=ignore_list)

        # Setup DynamoDB
        ddb_file_list = ddb.Table(self,
                                  "s3migrate_serverless",
                                  partition_key=ddb.Attribute(
                                      name="Key",
                                      type=ddb.AttributeType.STRING),
                                  billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        # Setup SQS
        sqs_queue_DLQ = sqs.Queue(self,
                                  "s3migrate_serverless_Q_DLQ",
                                  visibility_timeout=core.Duration.minutes(15),
                                  retention_period=core.Duration.days(14))
        sqs_queue = sqs.Queue(self,
                              "s3migrate_serverless_Q",
                              visibility_timeout=core.Duration.minutes(15),
                              retention_period=core.Duration.days(14),
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  max_receive_count=3, queue=sqs_queue_DLQ))

        # Setup API for Lambda to get IP address (for debug networking routing purpose)
        checkip = api.RestApi(
            self,
            "lambda-checkip-api",
            cloud_watch_role=True,
            deploy=True,
            description="For Lambda get IP address",
            default_integration=api.MockIntegration(
                integration_responses=[
                    api.IntegrationResponse(status_code="200",
                                            response_templates={
                                                "application/json":
                                                "$context.identity.sourceIp"
                                            })
                ],
                request_templates={"application/json": '{"statusCode": 200}'}),
            endpoint_types=[api.EndpointType.REGIONAL])
        checkip.root.add_method("GET",
                                method_responses=[
                                    api.MethodResponse(
                                        status_code="200",
                                        response_models={
                                            "application/json":
                                            api.Model.EMPTY_MODEL
                                        })
                                ])

        # Setup Lambda functions
        handler = lam.Function(self,
                               "s3-migrate-worker",
                               code=lam.Code.asset("./lambda"),
                               handler="lambda_function_worker.lambda_handler",
                               runtime=lam.Runtime.PYTHON_3_8,
                               memory_size=1024,
                               timeout=core.Duration.minutes(15),
                               tracing=lam.Tracing.ACTIVE,
                               environment={
                                   'table_queue_name':
                                   ddb_file_list.table_name,
                                   'Des_bucket_default':
                                   Des_bucket_default,
                                   'Des_prefix_default':
                                   Des_prefix_default,
                                   'StorageClass':
                                   StorageClass,
                                   'checkip_url':
                                   checkip.url,
                                   'ssm_parameter_credentials':
                                   ssm_parameter_credentials
                               })

        handler_jobsender = lam.Function(
            self,
            "s3-migrate-jobsender",
            code=lam.Code.asset("./lambda"),
            handler="lambda_function_jobsender.lambda_handler",
            runtime=lam.Runtime.PYTHON_3_8,
            memory_size=1024,
            timeout=core.Duration.minutes(15),
            tracing=lam.Tracing.ACTIVE,
            environment={
                'table_queue_name': ddb_file_list.table_name,
                'StorageClass': StorageClass,
                'checkip_url': checkip.url,
                'sqs_queue': sqs_queue.queue_name,
                'ssm_parameter_credentials': ssm_parameter_credentials,
                'ssm_parameter_ignore_list':
                ssm_parameter_ignore_list.parameter_name,
                'ssm_parameter_bucket': ssm_bucket_para.parameter_name
            })

        # Allow lambda read/write DDB, SQS
        ddb_file_list.grant_read_write_data(handler)
        ddb_file_list.grant_read_write_data(handler_jobsender)
        sqs_queue.grant_send_messages(handler_jobsender)
        # SQS trigger Lambda worker
        handler.add_event_source(SqsEventSource(sqs_queue, batch_size=1))

        # Option1: Create S3 Bucket, all new objects in this bucket will be transmitted by Lambda Worker
        s3bucket = s3.Bucket(self, "s3_new_migrate")
        s3bucket.grant_read(handler)
        s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                        s3n.SqsDestination(sqs_queue))

        # Option2: Allow Exist S3 Buckets to be read by Lambda functions.
        # Lambda Jobsender will scan and compare the these buckets and trigger Lambda Workers to transmit
        bucket_name = ''
        for b in bucket_para:
            if bucket_name != b['src_bucket']:  # 如果列了多个相同的Bucket,就跳过
                bucket_name = b['src_bucket']
                s3exist_bucket = s3.Bucket.from_bucket_name(
                    self,
                    bucket_name,  # 用这个做id
                    bucket_name=bucket_name)
                s3exist_bucket.grant_read(handler_jobsender)
                s3exist_bucket.grant_read(handler)

        # Allow Lambda read ssm parameters
        ssm_bucket_para.grant_read(handler_jobsender)
        ssm_credential_para.grant_read(handler)
        ssm_credential_para.grant_read(handler_jobsender)
        ssm_parameter_ignore_list.grant_read(handler_jobsender)

        # Schedule cron event to trigger Lambda Jobsender per hour:
        event.Rule(self,
                   'cron_trigger_jobsender',
                   schedule=event.Schedule.rate(core.Duration.hours(1)),
                   targets=[target.LambdaFunction(handler_jobsender)])

        # Create Lambda logs filter to create network traffic metric
        handler.log_group.add_metric_filter(
            "Complete-bytes",
            metric_name="Complete-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Complete", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Uploading-bytes",
            metric_name="Uploading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Uploading", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Downloading-bytes",
            metric_name="Downloading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Downloading", bytes, key]'))
        lambda_metric_Complete = cw.Metric(namespace="s3_migrate",
                                           metric_name="Complete-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        lambda_metric_Upload = cw.Metric(namespace="s3_migrate",
                                         metric_name="Uploading-bytes",
                                         statistic="Sum",
                                         period=core.Duration.minutes(1))
        lambda_metric_Download = cw.Metric(namespace="s3_migrate",
                                           metric_name="Downloading-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        handler.log_group.add_metric_filter(
            "ERROR",
            metric_name="ERROR-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"ERROR"'))
        handler.log_group.add_metric_filter(
            "WARNING",
            metric_name="WARNING-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"WARNING"'))
        log_metric_ERROR = cw.Metric(namespace="s3_migrate",
                                     metric_name="ERROR-Logs",
                                     statistic="Sum",
                                     period=core.Duration.minutes(1))
        log_metric_WARNING = cw.Metric(namespace="s3_migrate",
                                       metric_name="WARNING-Logs",
                                       statistic="Sum",
                                       period=core.Duration.minutes(1))

        # Dashboard to monitor SQS and Lambda
        board = cw.Dashboard(self, "s3_migrate_serverless")

        board.add_widgets(
            cw.GraphWidget(title="Lambda-NETWORK",
                           left=[
                               lambda_metric_Download, lambda_metric_Upload,
                               lambda_metric_Complete
                           ]),
            # TODO: here monitor all lambda concurrency not just the working one. Limitation from CDK
            # Lambda now supports monitor single lambda concurrency, will change this after CDK support
            cw.GraphWidget(title="Lambda-all-concurrent",
                           left=[
                               handler.metric_all_concurrent_executions(
                                   period=core.Duration.minutes(1))
                           ]),
            cw.GraphWidget(
                title="Lambda-invocations/errors/throttles",
                left=[
                    handler.metric_invocations(
                        period=core.Duration.minutes(1)),
                    handler.metric_errors(period=core.Duration.minutes(1)),
                    handler.metric_throttles(period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(
                title="Lambda-duration",
                left=[
                    handler.metric_duration(period=core.Duration.minutes(1))
                ]),
        )

        board.add_widgets(
            cw.GraphWidget(
                title="SQS-Jobs",
                left=[
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(
                title="SQS-DeadLetterQueue",
                left=[
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(title="ERROR/WARNING Logs",
                           left=[log_metric_ERROR],
                           right=[log_metric_WARNING]),
            cw.SingleValueWidget(
                title="Running/Waiting and Dead Jobs",
                metrics=[
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1))
                ],
                height=6))
        # Alarm for queue - DLQ
        alarm_DLQ = cw.Alarm(
            self,
            "SQS_DLQ",
            metric=sqs_queue_DLQ.metric_approximate_number_of_messages_visible(
            ),
            threshold=0,
            comparison_operator=cw.ComparisonOperator.GREATER_THAN_THRESHOLD,
            evaluation_periods=1,
            datapoints_to_alarm=1)
        alarm_topic = sns.Topic(self, "SQS queue-DLQ has dead letter")
        alarm_topic.add_subscription(
            subscription=sub.EmailSubscription(alarm_email))
        alarm_DLQ.add_alarm_action(action.SnsAction(alarm_topic))

        core.CfnOutput(self,
                       "Dashboard",
                       value="CloudWatch Dashboard name s3_migrate_serverless")
Esempio n. 20
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        memory: int = 1024,
        timeout: int = 30,
        concurrent: Optional[int] = None,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        layer_arn: Optional[str] = None,
        env: dict = {},
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, *kwargs)

        permissions = permissions or []

        lambda_env = DEFAULT_ENV.copy()
        lambda_env.update(env)

        lambda_function = aws_lambda.Function(
            self,
            f"{id}-lambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=self.create_package(code_dir),
            handler="handler.handler",
            memory_size=memory,
            reserved_concurrent_executions=concurrent,
            timeout=core.Duration.seconds(timeout),
            environment=lambda_env,
        )

        # # If you use dynamodb mosaic backend you should add IAM roles to read/put Item and maybe create Table
        # permissions.append(
        #     iam.PolicyStatement(
        #         actions=[
        #             "dynamodb:GetItem",
        #             "dynamodb:PutItem",
        #             "dynamodb:CreateTable",
        #             "dynamodb:Scan",
        #             "dynamodb:BatchWriteItem",
        #         ],
        #         resources=[f"arn:aws:dynamodb:{self.region}:{self.account}:table/*"],
        #     )
        # )

        for perm in permissions:
            lambda_function.add_to_role_policy(perm)

        if layer_arn:
            lambda_function.add_layers(
                aws_lambda.LayerVersion.from_layer_version_arn(
                    self,
                    layer_arn.split(":")[-2], layer_arn))

        api = apigw.HttpApi(
            self,
            f"{id}-endpoint",
            default_integration=apigw.LambdaProxyIntegration(
                handler=lambda_function),
        )
        core.CfnOutput(self, "Endpoint", value=api.url)
Esempio n. 21
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 ecr_repository: ecr.Repository,
                 ecs_service: ecs.FargateService,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        backend_repository = codecommit.Repository(
            self, 'BackendRepository',
            repository_name='MythicalMysfits-BackendRepository'
        )

        codebuild_project = codebuild.PipelineProject(
            self, 'BuildProject',
            project_name='MythicalMysfitsServiceCodeBuildProject',
            environment=codebuild.BuildEnvironment(
                build_image=codebuild.LinuxBuildImage.UBUNTU_14_04_PYTHON_3_5_2,
                compute_type=codebuild.ComputeType.SMALL,
                environment_variables={
                    'AWS_ACCOUNT_ID': codebuild.BuildEnvironmentVariable(
                        type=codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                        value=self.account),
                    'AWS_DEFAULT_REGION': codebuild.BuildEnvironmentVariable(
                        type=codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                        value=self.region),
                },
                privileged=True
            )
        )

        codebuild_policy_stm = _iam.PolicyStatement()
        codebuild_policy_stm.add_resources(backend_repository.repository_arn)
        codebuild_policy_stm.add_actions(
            "codecommit:ListBranches",
            "codecommit:ListRepositories",
            "codecommit:BatchGetRepositories",
            "codecommit:GitPull"
        )
        codebuild_project.add_to_role_policy(codebuild_policy_stm)

        ecr_repository.grant_pull_push(codebuild_project.grant_principal)

        source_output = codepipeline.Artifact()
        source_action = actions.CodeCommitSourceAction(
            action_name='CodeCommit-Source',
            branch='main',
            trigger=actions.CodeCommitTrigger.EVENTS,
            repository=backend_repository,
            output=source_output
        )

        build_output = codepipeline.Artifact()
        build_action = actions.CodeBuildAction(
            action_name='Build',
            input=source_output,
            outputs=[
                build_output
            ],
            project=codebuild_project
        )

        deploy_action = actions.EcsDeployAction(
            action_name='DeployAction',
            service=ecs_service,
            input=build_output
        )

        pipeline = codepipeline.Pipeline(
            self, 'Pipeline',
            pipeline_name='MythicalMysfitsPipeline',
        )
        pipeline.add_stage(stage_name='Source', actions=[source_action])
        pipeline.add_stage(stage_name='Build', actions=[build_action])
        # # the following pipeline.add_stage doesn't work
        # pipeline.add_stage(stage_name='Deploy', actions=[deploy_action])

        cdk.CfnOutput(self, 'BackendRepositoryCloneUrlHttp',
                      description='Backend Repository CloneUrl HTTP',
                      value=backend_repository.repository_clone_url_http)

        cdk.CfnOutput(self, 'BackendRepositoryCloneUrlSsh',
                      description='Backend Repository CloneUrl SSH',
                      value=backend_repository.repository_clone_url_ssh)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        
        lambda_policies = [iam.PolicyStatement(
                actions=[ "logs:CreateLogStream", "logs:PutLogEvents", "logs:CreateLogGroup"],
                effect=iam.Effect.ALLOW,
                resources=["arn:aws:logs:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*"]
            ), iam.PolicyStatement(
                actions=[ "dynamodb:*"],
                effect=iam.Effect.ALLOW,
                resources=["arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*"]
            )]
            
        base_api = _apigw.RestApi(self, 'PetclinicApiGatewayWithCors',
            rest_api_name='PetclinicApiGatewayWithCors')
            
        api_resource = base_api.root.add_resource('api')
        
        website_bucket = _s3.Bucket(self, 'PetclinicWebsite',
            website_index_document='index.html',
            public_read_access=True,
            removal_policy=core.RemovalPolicy.DESTROY
        )
        
        deployment = _s3deploy.BucketDeployment(self, 'PetclinicDeployWebsite',
          sources=[_s3deploy.Source.asset('./spring-petclinic-static')],
          destination_bucket=website_bucket,
          retain_on_delete=False
          #destination_key_prefix='web/static'
        )
        
        # Modify the config.js with CF custome resource
        modify_policy = [iam.PolicyStatement(
                actions=[ "s3:PutObject","s3:PutObjectAcl","s3:PutObjectVersionAcl","s3:GetObject"],
                effect=iam.Effect.ALLOW,
                resources=[website_bucket.bucket_arn + "/*"]
            ),iam.PolicyStatement(
                actions=[ "s3:ListBucket"],
                effect=iam.Effect.ALLOW,
                resources=[website_bucket.bucket_arn]
            ),iam.PolicyStatement(
                actions=[ "dynamodb:*"],
                effect=iam.Effect.ALLOW,
                resources=["arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*"]
            )]
            
        with open("custom-resource-code/init.py", encoding="utf-8") as fp:
            code_body = fp.read()
        
        dynamodb_tables = []
        
        # Warm Lambda function Event rule
        event_rule = _event.Rule(self, 'PetclinicLambdaWarmRule',
            schedule=_event.Schedule.rate(core.Duration.minutes(3))
        )
        
        for service in ['customer', 'vet', 'visit']:
            table = _dynamodb.Table(self, service.capitalize() + 'Table',
              partition_key={ 'name': 'id', 'type': _dynamodb.AttributeType.STRING },
              removal_policy=core.RemovalPolicy.DESTROY,
              read_capacity=5,
              write_capacity=5,
            )
            
            dynamodb_tables.append(table.table_name)
                
            base_lambda = _lambda.Function(self,'ApiPetclinic' + service.capitalize() + 'Lambda',
                handler='org.springframework.samples.petclinic.' + service + 's.StreamLambdaHandler::handleRequest',
                runtime=_lambda.Runtime.JAVA_8,
                code=_lambda.Code.asset('./spring-petclinic-serverless/spring-petclinic-' + service +'s-serverless/target/spring-petclinic-' + service +'s-serverless-2.0.7.jar'),
                memory_size=1024,
                timeout=core.Duration.seconds(300),
                initial_policy=lambda_policies,
                environment={"DYNAMODB_TABLE_NAME":table.table_name, "SERVER_SERVLET_CONTEXT_PATH":"/api/" + service}
            )
        
            entity = api_resource.add_resource(service)
            entity.add_proxy(default_integration=_apigw.LambdaIntegration(base_lambda))
            self.add_cors_options(entity)
            event_rule.add_target(_target.LambdaFunction(handler=base_lambda))
            
        resource = _cfn.CustomResource(self, "S3ModifyCustomResource",
            provider=_cfn.CustomResourceProvider.lambda_(
                _lambda.SingletonFunction(
                    self, "CustomResourceSingleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=_lambda.InlineCode(code_body),
                    handler="index.handler",
                    timeout=core.Duration.seconds(300),
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    initial_policy=modify_policy
                )
            ),
            properties={"Bucket": website_bucket.bucket_name, 
                        "InvokeUrl":base_api.url,
                        "DynamoDBTables": dynamodb_tables
            }
        )
        
        core.CfnOutput(self,"PetclinicWebsiteUrl",export_name="PetclinicWebsiteUrl",value=website_bucket.bucket_website_url)
    def __init__(self, scope: core.Construct, id: str, config_dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        """ Create the datalake database """
        createDatalakeDB = glue.Database(
            self,
            "createDatalakeDB",
            database_name=config_dict['datalake_db_name'])

        core.CfnOutput(self,
                       "createDatalakeDBName",
                       value=createDatalakeDB.database_name)
        """ Create Comp Reg Table """

        createDatalakeCompRegTable = glue.Table(
            self,
            "createDatalakeCompRegTable",
            columns=[
                glue.Column(name="lot_compound_id",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="version_id",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="parent_id",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="smiles",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="parent_mw",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="salt_multiplicity",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="salt_name",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="formula_weight",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="parent_alias",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="stereochemistry",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="stereocomment",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="geometric_isomerism",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="parent_comment",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="parent_project",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="elnref",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="msmethod",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="msmass",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="provider",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="purity",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="puritymethod",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="nmrshifts",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="lotalias",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="lot_comment",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="lot_project",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="molfile",
                            type=glue.Type(input_string="string",
                                           is_primitive=True)),
                glue.Column(name="checksum",
                            type=glue.Type(input_string="string",
                                           is_primitive=True))
            ],
            database=createDatalakeDB.from_database_arn(
                self, "GetDBArn", database_arn=createDatalakeDB.database_arn),
            data_format=glue.DataFormat(
                input_format=glue.InputFormat.PARQUET,
                output_format=glue.OutputFormat.PARQUET,
                serialization_library=glue.SerializationLibrary.PARQUET),
            table_name="tbl_compound_data",
            bucket=s3.Bucket.from_bucket_name(
                self,
                "getIBucket",
                bucket_name=config_dict['datalake_bucket_name']),
            compressed=True,
            description=
            "This table contains data regarding compound registration coming from  RDS",
            partition_keys=[
                glue.Column(name="dt",
                            type=glue.Type(input_string="string",
                                           is_primitive=True))
            ],
            s3_prefix="compound_reg/compound_data/")

        core.CfnOutput(self,
                       "createDatalakeCompRegTableName",
                       value=createDatalakeCompRegTable.table_name)
Esempio n. 24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vahalla_group = Group(self, "Vahalla Project")

        dev01 = User(self, "Vahalla Developer")
        dev01.add_to_group(vahalla_group)

        # Add AWS managed policy for EC2 Read Only access for the console.
        vahalla_group.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"))

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "VahallaProjectPolicy",
            policy_document=json.loads(project_specific_tags),
            groups=[vahalla_group.group_name],
        )

        vpc = Vpc.from_lookup(self, "Default VPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of 1.31
        vahalla = core.CfnTag(key="project", value="vahalla")
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[vahalla],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(self,
                                            "EmptySecurityGroup",
                                            vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-sg",
        )
        core.CfnOutput(self,
                       "DefaultAMI",
                       value=image_id,
                       export_name="default-ami")
Esempio n. 25
0
    def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Version of ParallelCluster for Cloud9.
        pcluster_version = cdk.CfnParameter(
            self,
            'ParallelClusterVersion',
            description=
            'Specify a custom parallelcluster version. See https://pypi.org/project/aws-parallelcluster/#history for options.',
            default='2.8.0',
            type='String',
            allowed_values=get_version_list('aws-parallelcluster'))

        # S3 URI for Config file
        config = cdk.CfnParameter(
            self,
            'ConfigS3URI',
            description='Set a custom parallelcluster config file.',
            default=
            'https://notearshpc-quickstart.s3.amazonaws.com/{0}/config.ini'.
            format(__version__))

        # Password
        password = cdk.CfnParameter(
            self,
            'UserPasswordParameter',
            description='Set a password for the hpc-quickstart user',
            no_echo=True)

        # create a VPC
        vpc = ec2.Vpc(
            self,
            'VPC',
            cidr='10.0.0.0/16',
            gateway_endpoints={
                "S3":
                ec2.GatewayVpcEndpointOptions(
                    service=ec2.GatewayVpcEndpointAwsService.S3),
                "DynamoDB":
                ec2.GatewayVpcEndpointOptions(
                    service=ec2.GatewayVpcEndpointAwsService.DYNAMODB)
            },
            max_azs=99)

        # create a private and public subnet per vpc
        selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE)

        # Output created subnets
        for i, public_subnet in enumerate(vpc.public_subnets):
            cdk.CfnOutput(self,
                          'PublicSubnet%i' % i,
                          value=public_subnet.subnet_id)

        for i, private_subnet in enumerate(vpc.private_subnets):
            cdk.CfnOutput(self,
                          'PrivateSubnet%i' % i,
                          value=private_subnet.subnet_id)

        cdk.CfnOutput(self, 'VPCId', value=vpc.vpc_id)

        # Create a Bucket
        data_bucket = s3.Bucket(self, "DataRepository")
        cdk.CfnOutput(self, 'DataRespository', value=data_bucket.bucket_name)
        cloudtrail_bucket = s3.Bucket(self, "CloudTrailLogs")
        quickstart_bucket = s3.Bucket.from_bucket_name(self,
                                                       'QuickStartBucket',
                                                       'aws-quickstart')

        # Upload Bootstrap Script to that bucket
        bootstrap_script = assets.Asset(self,
                                        'BootstrapScript',
                                        path='scripts/bootstrap.sh')

        # Upload parallel cluster post_install_script to that bucket
        pcluster_post_install_script = assets.Asset(
            self,
            'PclusterPostInstallScript',
            path='scripts/post_install_script.sh')

        # Upload parallel cluster post_install_script to that bucket
        pcluster_config_script = assets.Asset(self,
                                              'PclusterConfigScript',
                                              path='scripts/config.ini')

        # Setup CloudTrail
        cloudtrail.Trail(self, 'CloudTrail', bucket=cloudtrail_bucket)

        # Create a Cloud9 instance
        # Cloud9 doesn't have the ability to provide userdata
        # Because of this we need to use SSM run command
        cloud9_instance = cloud9.Ec2Environment(
            self,
            'ResearchWorkspace',
            vpc=vpc,
            instance_type=ec2.InstanceType(
                instance_type_identifier='c5.large'))
        cdk.CfnOutput(self,
                      'Research Workspace URL',
                      value=cloud9_instance.ide_url)

        # Create a keypair in lambda and store the private key in SecretsManager
        c9_createkeypair_role = iam.Role(
            self,
            'Cloud9CreateKeypairRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        c9_createkeypair_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        # Add IAM permissions to the lambda role
        c9_createkeypair_role.add_to_policy(
            iam.PolicyStatement(
                actions=['ec2:CreateKeyPair', 'ec2:DeleteKeyPair'],
                resources=['*'],
            ))

        # Lambda for Cloud9 keypair
        c9_createkeypair_lambda = _lambda.Function(
            self,
            'C9CreateKeyPairLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(300),
            role=c9_createkeypair_role,
            code=_lambda.Code.asset('functions/source/c9keypair'),
        )

        c9_createkeypair_provider = cr.Provider(
            self,
            "C9CreateKeyPairProvider",
            on_event_handler=c9_createkeypair_lambda)

        c9_createkeypair_cr = cfn.CustomResource(
            self,
            "C9CreateKeyPair",
            provider=c9_createkeypair_provider,
            properties={'ServiceToken': c9_createkeypair_lambda.function_arn})
        #c9_createkeypair_cr.node.add_dependency(instance_id)
        c9_ssh_private_key_secret = secretsmanager.CfnSecret(
            self,
            'SshPrivateKeySecret',
            secret_string=c9_createkeypair_cr.get_att_string('PrivateKey'))

        # The iam policy has a <REGION> parameter that needs to be replaced.
        # We do it programmatically so future versions of the synth'd stack
        # template include all regions.
        with open('iam/ParallelClusterUserPolicy.json') as json_file:
            data = json.load(json_file)
            for s in data['Statement']:
                if s['Sid'] == 'S3ParallelClusterReadOnly':
                    s['Resource'] = []
                    for r in region_info.RegionInfo.regions:
                        s['Resource'].append(
                            'arn:aws:s3:::{0}-aws-parallelcluster*'.format(
                                r.name))

            parallelcluster_user_policy = iam.CfnManagedPolicy(
                self,
                'ParallelClusterUserPolicy',
                policy_document=iam.PolicyDocument.from_json(data))

        # Cloud9 IAM Role
        cloud9_role = iam.Role(
            self,
            'Cloud9Role',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'))
        cloud9_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))
        cloud9_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User'))
        cloud9_role.add_managed_policy(
            iam.ManagedPolicy.from_managed_policy_arn(
                self, 'AttachParallelClusterUserPolicy',
                parallelcluster_user_policy.ref))
        cloud9_role.add_to_policy(
            iam.PolicyStatement(resources=['*'],
                                actions=[
                                    'ec2:DescribeInstances',
                                    'ec2:DescribeVolumes', 'ec2:ModifyVolume'
                                ]))
        cloud9_role.add_to_policy(
            iam.PolicyStatement(resources=[c9_ssh_private_key_secret.ref],
                                actions=['secretsmanager:GetSecretValue']))
        cloud9_role.add_to_policy(
            iam.PolicyStatement(
                actions=["s3:Get*", "s3:List*"],
                resources=[
                    "arn:aws:s3:::%s/*" % (data_bucket.bucket_name),
                    "arn:aws:s3:::%s" % (data_bucket.bucket_name)
                ]))

        bootstrap_script.grant_read(cloud9_role)
        pcluster_post_install_script.grant_read(cloud9_role)
        pcluster_config_script.grant_read(cloud9_role)

        # Admin Group
        admin_group = iam.Group(self, 'AdminGroup')
        admin_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AdministratorAccess'))
        admin_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AWSCloud9Administrator'))

        # PowerUser Group
        poweruser_group = iam.Group(self, 'PowerUserGroup')
        poweruser_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('PowerUserAccess'))
        poweruser_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AWSCloud9Administrator'))

        # HPC User
        user = iam.CfnUser(
            self,
            'Researcher',
            groups=[admin_group.node.default_child.ref],
            login_profile=iam.CfnUser.LoginProfileProperty(
                password_reset_required=True,
                password=cdk.SecretValue.cfn_parameter(password).to_string()))

        create_user = cdk.CfnParameter(self,
                                       "CreateUser",
                                       default="false",
                                       type="String",
                                       allowed_values=['true', 'false'
                                                       ]).value_as_string
        user_condition = cdk.CfnCondition(self,
                                          "UserCondition",
                                          expression=cdk.Fn.condition_equals(
                                              create_user, "true"))
        user.cfn_options.condition = user_condition

        cdk.CfnOutput(self,
                      'UserLoginUrl',
                      value="".join([
                          "https://", self.account,
                          ".signin.aws.amazon.com/console"
                      ]),
                      condition=user_condition)
        cdk.CfnOutput(self,
                      'UserName',
                      value=user.ref,
                      condition=user_condition)

        # Cloud9 Setup IAM Role
        cloud9_setup_role = iam.Role(
            self,
            'Cloud9SetupRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        cloud9_setup_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        # Allow pcluster to be run in bootstrap
        cloud9_setup_role.add_managed_policy(
            iam.ManagedPolicy.from_managed_policy_arn(
                self, 'AttachParallelClusterUserPolicySetup',
                parallelcluster_user_policy.ref))

        # Add IAM permissions to the lambda role
        cloud9_setup_role.add_to_policy(
            iam.PolicyStatement(
                actions=[
                    'cloudformation:DescribeStackResources',
                    'ec2:AssociateIamInstanceProfile',
                    'ec2:AuthorizeSecurityGroupIngress',
                    'ec2:DescribeInstances',
                    'ec2:DescribeInstanceStatus',
                    'ec2:DescribeInstanceAttribute',
                    'ec2:DescribeIamInstanceProfileAssociations',
                    'ec2:DescribeVolumes',
                    'ec2:DesctibeVolumeAttribute',
                    'ec2:DescribeVolumesModifications',
                    'ec2:DescribeVolumeStatus',
                    'ssm:DescribeInstanceInformation',
                    'ec2:ModifyVolume',
                    'ec2:ReplaceIamInstanceProfileAssociation',
                    'ec2:ReportInstanceStatus',
                    'ssm:SendCommand',
                    'ssm:GetCommandInvocation',
                    's3:GetObject',
                    'lambda:AddPermission',
                    'lambda:RemovePermission',
                    'events:PutRule',
                    'events:DeleteRule',
                    'events:PutTargets',
                    'events:RemoveTargets',
                    'cloud9:CreateEnvironmentMembership',
                ],
                resources=['*'],
            ))

        cloud9_setup_role.add_to_policy(
            iam.PolicyStatement(actions=['iam:PassRole'],
                                resources=[cloud9_role.role_arn]))

        cloud9_setup_role.add_to_policy(
            iam.PolicyStatement(
                actions=['lambda:AddPermission', 'lambda:RemovePermission'],
                resources=['*']))

        # Cloud9 Instance Profile
        c9_instance_profile = iam.CfnInstanceProfile(
            self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name])

        # Lambda to add Instance Profile to Cloud9
        c9_instance_profile_lambda = _lambda.Function(
            self,
            'C9InstanceProfileLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(900),
            role=cloud9_setup_role,
            code=_lambda.Code.asset('functions/source/c9InstanceProfile'),
        )

        c9_instance_profile_provider = cr.Provider(
            self,
            "C9InstanceProfileProvider",
            on_event_handler=c9_instance_profile_lambda,
        )

        instance_id = cfn.CustomResource(self,
                                         "C9InstanceProfile",
                                         provider=c9_instance_profile_provider,
                                         properties={
                                             'InstanceProfile':
                                             c9_instance_profile.ref,
                                             'Cloud9Environment':
                                             cloud9_instance.environment_id,
                                         })
        instance_id.node.add_dependency(cloud9_instance)

        # Lambda for Cloud9 Bootstrap
        c9_bootstrap_lambda = _lambda.Function(
            self,
            'C9BootstrapLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(900),
            role=cloud9_setup_role,
            code=_lambda.Code.asset('functions/source/c9bootstrap'),
        )

        c9_bootstrap_provider = cr.Provider(
            self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda)

        c9_bootstrap_cr = cfn.CustomResource(
            self,
            "C9Bootstrap",
            provider=c9_bootstrap_provider,
            properties={
                'Cloud9Environment':
                cloud9_instance.environment_id,
                'BootstrapPath':
                's3://%s/%s' % (bootstrap_script.s3_bucket_name,
                                bootstrap_script.s3_object_key),
                'Config':
                config,
                'VPCID':
                vpc.vpc_id,
                'MasterSubnetID':
                vpc.public_subnets[0].subnet_id,
                'ComputeSubnetID':
                vpc.private_subnets[0].subnet_id,
                'PostInstallScriptS3Url':
                "".join([
                    's3://', pcluster_post_install_script.s3_bucket_name, "/",
                    pcluster_post_install_script.s3_object_key
                ]),
                'PostInstallScriptBucket':
                pcluster_post_install_script.s3_bucket_name,
                'S3ReadWriteResource':
                data_bucket.bucket_arn,
                'S3ReadWriteUrl':
                's3://%s' % (data_bucket.bucket_name),
                'KeyPairId':
                c9_createkeypair_cr.ref,
                'KeyPairSecretArn':
                c9_ssh_private_key_secret.ref,
                'UserArn':
                user.attr_arn,
                'PclusterVersion':
                pcluster_version.value_as_string
            })
        c9_bootstrap_cr.node.add_dependency(instance_id)
        c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr)
        c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret)
        c9_bootstrap_cr.node.add_dependency(data_bucket)

        enable_budget = cdk.CfnParameter(self,
                                         "EnableBudget",
                                         default="true",
                                         type="String",
                                         allowed_values=['true', 'false'
                                                         ]).value_as_string
        # Budgets
        budget_properties = {
            'budgetType': "COST",
            'timeUnit': "ANNUALLY",
            'budgetLimit': {
                'amount':
                cdk.CfnParameter(
                    self,
                    'BudgetLimit',
                    description=
                    'The initial budget for this project in USD ($).',
                    default=2000,
                    type='Number').value_as_number,
                'unit':
                "USD",
            },
            'costFilters': None,
            'costTypes': {
                'includeCredit': False,
                'includeDiscount': True,
                'includeOtherSubscription': True,
                'includeRecurring': True,
                'includeRefund': True,
                'includeSubscription': True,
                'includeSupport': True,
                'includeTax': True,
                'includeUpfront': True,
                'useAmortized': False,
                'useBlended': False,
            },
            'plannedBudgetLimits': None,
            'timePeriod': None,
        }

        email = {
            'notification': {
                'comparisonOperator': "GREATER_THAN",
                'notificationType': "ACTUAL",
                'threshold': 80,
                'thresholdType': "PERCENTAGE",
            },
            'subscribers': [{
                'address':
                cdk.CfnParameter(
                    self,
                    'NotificationEmail',
                    description=
                    'This email address will receive billing alarm notifications when 80% of the budget limit is reached.',
                    default='*****@*****.**').value_as_string,
                'subscriptionType':
                "EMAIL",
            }]
        }

        overall_budget = budgets.CfnBudget(
            self,
            "HPCBudget",
            budget=budget_properties,
            notifications_with_subscribers=[email],
        )
        overall_budget.cfn_options.condition = cdk.CfnCondition(
            self,
            "BudgetCondition",
            expression=cdk.Fn.condition_equals(enable_budget, "true"))
Esempio n. 26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        current_directory = os.path.realpath(
            os.path.join(os.getcwd(), os.path.dirname(__file__)))
        allowed_values = yaml.load(open(
            os.path.join(current_directory, "..", "..",
                         "allowed_values.yaml")),
                                   Loader=yaml.SafeLoader)
        ami_mapping = {"AMI": {"OEJITSI": AMI_NAME}}
        for region in generated_ami_ids.keys():
            ami_mapping[region] = {"OEJITSI": generated_ami_ids[region]}
        aws_ami_region_map = core.CfnMapping(self,
                                             "AWSAMIRegionMap",
                                             mapping=ami_mapping)

        # utility function to parse the unique id from the stack id for
        # shorter resource names  using cloudformation functions
        def append_stack_uuid(name):
            return core.Fn.join("-", [
                name,
                core.Fn.select(
                    0,
                    core.Fn.split(
                        "-",
                        core.Fn.select(2, core.Fn.split(
                            "/", core.Aws.STACK_ID))))
            ])

        #
        # PARAMETERS
        #

        cidr_block_param = core.CfnParameter(
            self,
            "IngressCidrBlock",
            allowed_pattern="((\d{1,3})\.){3}\d{1,3}/\d{1,2}",
            default="0.0.0.0/0",
            description=
            "Required: A CIDR block to restrict access to the Jitsi application. Leave as 0.0.0.0/0 to allow public access from internet."
        )
        ec2_instance_type_param = core.CfnParameter(
            self,
            "InstanceType",
            allowed_values=allowed_values["allowed_instance_types"],
            default="t3.xlarge",
            description=
            "Required: The EC2 instance type for the application Auto Scaling Group."
        )
        jitsi_hostname_param = core.CfnParameter(
            self,
            "JitsiHostname",
            description=
            "Required: The hostname to access Jitsi. E.G. 'jitsi.internal.mycompany.com'"
        )
        jitsi_interface_app_name_param = core.CfnParameter(
            self,
            "JitsiInterfaceAppName",
            default="Jitsi Meet",
            description=
            "Optional: Customize the app name on the Jitsi interface.")
        jitsi_interface_default_remote_display_name_param = core.CfnParameter(
            self,
            "JitsiInterfaceDefaultRemoteDisplayName",
            default="Fellow Jitster",
            description=
            "Optional: Customize the default display name for Jitsi users.")
        jitsi_interface_native_app_name_param = core.CfnParameter(
            self,
            "JitsiInterfaceNativeAppName",
            default="Jitsi Meet",
            description=
            "Optional: Customize the native app name on the Jitsi interface.")
        jitsi_interface_show_brand_watermark_param = core.CfnParameter(
            self,
            "JitsiInterfaceShowBrandWatermark",
            allowed_values=["true", "false"],
            default="true",
            description=
            "Optional: Display the watermark logo image in the upper left corner."
        )
        jitsi_interface_show_watermark_for_guests_param = core.CfnParameter(
            self,
            "JitsiInterfaceShowWatermarkForGuests",
            allowed_values=["true", "false"],
            default="true",
            description=
            "Optional: Display the watermark logo image in the upper left corner for guest users. This can be set to override the general setting behavior for guest users."
        )
        jitsi_interface_brand_watermark_param = core.CfnParameter(
            self,
            "JitsiInterfaceBrandWatermark",
            default="",
            description=
            "Optional: Provide a URL to a PNG image to be used as the brand watermark logo image in the upper right corner. File should be publically available for download."
        )
        jitsi_interface_brand_watermark_link_param = core.CfnParameter(
            self,
            "JitsiInterfaceBrandWatermarkLink",
            default="http://jitsi.org",
            description=
            "Optional: Provide a link destination for the brand watermark logo image in the upper right corner."
        )
        jitsi_interface_watermark_param = core.CfnParameter(
            self,
            "JitsiInterfaceWatermark",
            default="",
            description=
            "Optional: Provide a URL to a PNG image to be used as the watermark logo image in the upper left corner. File should be publically available for download."
        )
        jitsi_interface_watermark_link_param = core.CfnParameter(
            self,
            "JitsiInterfaceWatermarkLink",
            default="http://jitsi.org",
            description=
            "Optional: Provide a link destination for the Jitsi watermark logo image in the upper left corner."
        )
        route_53_hosted_zone_name_param = core.CfnParameter(
            self,
            "Route53HostedZoneName",
            description=
            "Required: Route 53 Hosted Zone name in which a DNS record will be created by this template. Must already exist and be the domain part of the Jitsi Hostname parameter, without trailing dot. E.G. 'internal.mycompany.com'"
        )
        notification_email_param = core.CfnParameter(
            self,
            "NotificationEmail",
            default="",
            description=
            "Optional: Specify an email address to get emails about deploys, Let's Encrypt, and other system events."
        )

        #
        # CONDITIONS
        #

        notification_email_exists_condition = core.CfnCondition(
            self,
            "NotificationEmailExistsCondition",
            expression=core.Fn.condition_not(
                core.Fn.condition_equals(notification_email_param.value, "")))

        #
        # RESOURCES
        #

        # vpc
        vpc = Vpc(self, "Vpc")

        # sns
        sns_notification_topic = aws_sns.CfnTopic(
            self,
            "NotificationTopic",
            topic_name="{}-notifications".format(core.Aws.STACK_NAME))
        sns_notification_subscription = aws_sns.CfnSubscription(
            self,
            "NotificationSubscription",
            protocol="email",
            topic_arn=sns_notification_topic.ref,
            endpoint=notification_email_param.value_as_string)
        sns_notification_subscription.cfn_options.condition = notification_email_exists_condition
        iam_notification_publish_policy = aws_iam.PolicyDocument(statements=[
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["sns:Publish"],
                                    resources=[sns_notification_topic.ref])
        ])

        # cloudwatch
        app_log_group = aws_logs.CfnLogGroup(
            self, "JitsiAppLogGroup", retention_in_days=TWO_YEARS_IN_DAYS)
        app_log_group.cfn_options.update_replace_policy = core.CfnDeletionPolicy.RETAIN
        app_log_group.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN
        system_log_group = aws_logs.CfnLogGroup(
            self, "JitsiSystemLogGroup", retention_in_days=TWO_YEARS_IN_DAYS)
        system_log_group.cfn_options.update_replace_policy = core.CfnDeletionPolicy.RETAIN
        system_log_group.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # iam
        iam_jitsi_instance_role = aws_iam.CfnRole(
            self,
            "JitsiInstanceRole",
            assume_role_policy_document=aws_iam.PolicyDocument(statements=[
                aws_iam.PolicyStatement(
                    effect=aws_iam.Effect.ALLOW,
                    actions=["sts:AssumeRole"],
                    principals=[aws_iam.ServicePrincipal("ec2.amazonaws.com")])
            ]),
            policies=[
                aws_iam.CfnRole.PolicyProperty(
                    policy_document=aws_iam.PolicyDocument(statements=[
                        aws_iam.PolicyStatement(
                            effect=aws_iam.Effect.ALLOW,
                            actions=[
                                "logs:CreateLogStream",
                                "logs:DescribeLogStreams", "logs:PutLogEvents"
                            ],
                            resources=[
                                app_log_group.attr_arn,
                                system_log_group.attr_arn
                            ])
                    ]),
                    policy_name="AllowStreamLogsToCloudWatch"),
                aws_iam.CfnRole.PolicyProperty(
                    policy_document=aws_iam.PolicyDocument(statements=[
                        aws_iam.PolicyStatement(
                            effect=aws_iam.Effect.ALLOW,
                            actions=[
                                "ec2:AssociateAddress", "ec2:DescribeVolumes",
                                "ec2:DescribeTags",
                                "cloudwatch:GetMetricStatistics",
                                "cloudwatch:ListMetrics",
                                "cloudwatch:PutMetricData"
                            ],
                            resources=["*"])
                    ]),
                    policy_name="AllowStreamMetricsToCloudWatch"),
                aws_iam.CfnRole.PolicyProperty(
                    policy_document=aws_iam.PolicyDocument(statements=[
                        aws_iam.PolicyStatement(
                            effect=aws_iam.Effect.ALLOW,
                            actions=["autoscaling:Describe*"],
                            resources=["*"])
                    ]),
                    policy_name="AllowDescribeAutoScaling"),
            ],
            managed_policy_arns=[
                "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
            ])

        # ec2
        jitsi_sg = aws_ec2.CfnSecurityGroup(
            self,
            "JitsiSg",
            group_description="Jitsi security group",
            vpc_id=vpc.id())

        eip = aws_ec2.CfnEIP(self, "Eip", domain="vpc")
        core.Tags.of(eip).add("Name", "{}/Eip".format(core.Aws.STACK_NAME))

        ec2_instance_profile = aws_iam.CfnInstanceProfile(
            self, "JitsiInstanceProfile", roles=[iam_jitsi_instance_role.ref])
        with open("jitsi/jitsi_launch_config_user_data.sh") as f:
            jitsi_launch_config_user_data = f.read()
        ec2_launch_config = aws_autoscaling.CfnLaunchConfiguration(
            self,
            "JitsiLaunchConfig",
            image_id=core.Fn.find_in_map("AWSAMIRegionMap", core.Aws.REGION,
                                         "OEJITSI"),
            instance_type=ec2_instance_type_param.value_as_string,
            iam_instance_profile=ec2_instance_profile.ref,
            security_groups=[jitsi_sg.ref],
            user_data=(core.Fn.base64(
                core.Fn.sub(
                    jitsi_launch_config_user_data, {
                        "JitsiHostname":
                        jitsi_hostname_param.value_as_string,
                        "JitsiPublicIP":
                        eip.ref,
                        "LetsEncryptCertificateEmail":
                        notification_email_param.value_as_string
                    }))))

        # autoscaling
        asg = aws_autoscaling.CfnAutoScalingGroup(
            self,
            "JitsiAsg",
            launch_configuration_name=ec2_launch_config.ref,
            desired_capacity="1",
            max_size="1",
            min_size="1",
            vpc_zone_identifier=vpc.public_subnet_ids())
        asg.cfn_options.creation_policy = core.CfnCreationPolicy(
            resource_signal=core.CfnResourceSignal(count=1, timeout="PT15M"))
        asg.cfn_options.update_policy = core.CfnUpdatePolicy(
            auto_scaling_rolling_update=core.CfnAutoScalingRollingUpdate(
                max_batch_size=1,
                min_instances_in_service=0,
                pause_time="PT15M",
                wait_on_resource_signals=True))
        core.Tags.of(asg).add("Name",
                              "{}/JitsiAsg".format(core.Aws.STACK_NAME))

        jitsi_http_ingress = aws_ec2.CfnSecurityGroupIngress(
            self,
            "JitsiHttpSgIngress",
            cidr_ip=cidr_block_param.value_as_string,
            from_port=80,
            group_id=jitsi_sg.ref,
            ip_protocol="tcp",
            to_port=80)
        jitsi_https_ingress = aws_ec2.CfnSecurityGroupIngress(
            self,
            "JitsiHttpsSgIngress",
            cidr_ip=cidr_block_param.value_as_string,
            from_port=443,
            group_id=jitsi_sg.ref,
            ip_protocol="tcp",
            to_port=443)
        jitsi_fallback_network_audio_video_ingress = aws_ec2.CfnSecurityGroupIngress(
            self,
            "JitsiFallbackNetworkAudioVideoSgIngress",
            cidr_ip=cidr_block_param.value_as_string,
            from_port=4443,
            group_id=jitsi_sg.ref,
            ip_protocol="tcp",
            to_port=4443)
        jitsi_general_network_audio_video_ingress = aws_ec2.CfnSecurityGroupIngress(
            self,
            "JitsiGeneralNetworkAudioVideoSgIngress",
            cidr_ip=cidr_block_param.value_as_string,
            from_port=10000,
            group_id=jitsi_sg.ref,
            ip_protocol="udp",
            to_port=10000)

        # route 53
        record_set = aws_route53.CfnRecordSet(
            self,
            "RecordSet",
            hosted_zone_name=
            f"{route_53_hosted_zone_name_param.value_as_string}.",
            name=jitsi_hostname_param.value_as_string,
            resource_records=[eip.ref],
            type="A")
        # https://github.com/aws/aws-cdk/issues/8431
        record_set.add_property_override("TTL", 60)

        # AWS::CloudFormation::Interface
        self.template_options.metadata = {
            "OE::Patterns::TemplateVersion": template_version,
            "AWS::CloudFormation::Interface": {
                "ParameterGroups": [{
                    "Label": {
                        "default": "Infrastructure Config"
                    },
                    "Parameters": [
                        jitsi_hostname_param.logical_id,
                        route_53_hosted_zone_name_param.logical_id,
                        cidr_block_param.logical_id,
                        ec2_instance_type_param.logical_id,
                        notification_email_param.logical_id
                    ]
                }, {
                    "Label": {
                        "default": "Jitsi Config"
                    },
                    "Parameters": [
                        jitsi_interface_app_name_param.logical_id,
                        jitsi_interface_default_remote_display_name_param.
                        logical_id,
                        jitsi_interface_native_app_name_param.logical_id,
                        jitsi_interface_show_brand_watermark_param.logical_id,
                        jitsi_interface_show_watermark_for_guests_param.
                        logical_id,
                        jitsi_interface_brand_watermark_param.logical_id,
                        jitsi_interface_brand_watermark_link_param.logical_id,
                        jitsi_interface_watermark_param.logical_id,
                        jitsi_interface_watermark_link_param.logical_id,
                    ]
                }, *vpc.metadata_parameter_group()],
                "ParameterLabels": {
                    cidr_block_param.logical_id: {
                        "default": "Ingress CIDR Block"
                    },
                    ec2_instance_type_param.logical_id: {
                        "default": "EC2 instance type"
                    },
                    jitsi_hostname_param.logical_id: {
                        "default": "Jitsi Hostname"
                    },
                    jitsi_interface_app_name_param.logical_id: {
                        "default": "Jitsi Interface App Name"
                    },
                    jitsi_interface_default_remote_display_name_param.logical_id:
                    {
                        "default":
                        "Jitsi Interface Default Remote Display Name"
                    },
                    jitsi_interface_native_app_name_param.logical_id: {
                        "default": "Jitsi Interface Native App Name"
                    },
                    jitsi_interface_show_brand_watermark_param.logical_id: {
                        "default": "Jitsi Interface Show Watermark"
                    },
                    jitsi_interface_show_watermark_for_guests_param.logical_id:
                    {
                        "default": "Jitsi Interface Show Watermark For Guests"
                    },
                    jitsi_interface_brand_watermark_param.logical_id: {
                        "default": "Jitsi Interface Watermark"
                    },
                    jitsi_interface_brand_watermark_link_param.logical_id: {
                        "default": "Jitsi Interface Watermark Link"
                    },
                    jitsi_interface_watermark_param.logical_id: {
                        "default": "Jitsi Interface Watermark"
                    },
                    jitsi_interface_watermark_link_param.logical_id: {
                        "default": "Jitsi Interface Watermark Link"
                    },
                    notification_email_param.logical_id: {
                        "default": "Notification Email"
                    },
                    route_53_hosted_zone_name_param.logical_id: {
                        "default": "AWS Route 53 Hosted Zone Name"
                    },
                    **vpc.metadata_parameter_labels()
                }
            }
        }

        #
        # OUTPUTS
        #

        eip_output = core.CfnOutput(
            self,
            "EipOutput",
            description=
            "The Elastic IP address dynamically mapped to the autoscaling group instance.",
            value=eip.ref)
        endpoint_output = core.CfnOutput(
            self,
            "JitsiUrl",
            description="The URL for the Jitsi instance.",
            value=core.Fn.join(
                "", ["https://", jitsi_hostname_param.value_as_string]))
Esempio n. 27
0
    def __init__(self,
                 scope: cdk.Construct,
                 construct_id: str,
                 stack_log_level: str,
                 custom_bkt_name: str = None,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.data_bkt = _s3.Bucket(
            self,
            "dataBucket",
            versioned=True,
            # auto_delete_objects=True,
            # removal_policy=cdk.RemovalPolicy.DESTROY,
            # bucket_name="new-app-bucket-example",
        )

        ##################################################
        ########         ACCESS POINTS         ###########
        ##################################################

        # Lets set custom bucket name if it is set
        if custom_bkt_name:
            cfn_data_bkt = self.data_bkt.node.default_child
            cfn_data_bkt.add_override("Properties.BucketName", custom_bkt_name)

        # Delegate Privilege management to Access Point in bucket policy
        # https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points-policies.html#access-points-delegating-control

        self.data_bkt.add_to_resource_policy(
            _iam.PolicyStatement(actions=["*"],
                                 principals=[_iam.AnyPrincipal()],
                                 resources=[
                                     f"{self.data_bkt.bucket_arn}",
                                     f"{self.data_bkt.arn_for_objects('*')}"
                                 ],
                                 conditions={
                                     "StringEquals": {
                                         "s3:DataAccessPointAccount":
                                         f"{cdk.Aws.ACCOUNT_ID}"
                                     }
                                 }))

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )
        output_1 = cdk.CfnOutput(self,
                                 "SalesEventsBucket",
                                 value=f"{self.data_bkt.bucket_name}",
                                 description=f"The datasource bucket name")
        output_2 = cdk.CfnOutput(
            self,
            "dataSourceBucketUrl",
            value=
            f"https://console.aws.amazon.com/s3/buckets/{self.data_bkt.bucket_name}",
            description=f"The datasource bucket url")
    def __init__(
        self, scope: cdk.Construct, construct_id: str, domain: SMSDomainStack, **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the Lambda Stack for pre-populating the user home directory
        studio_user_lambda = StudioUserLambda(
            self, "FnPopulateStudioUser", vpc=domain.vpc, domain=domain.domain
        )

        # Generate the CF template for the studio user
        stage = cdk.Stage(self, "IntermediateStage")
        SMSIAMUserStack(
            stage,
            "StudioUserStack",
            synthesizer=cdk.BootstraplessSynthesizer(),
        )
        assembly = stage.synth(force=True)

        # Retrive the local path of the CF template
        template_full_path = assembly.stacks[0].template_full_path

        # Upload CF template to s3 to create an asset to reference
        s3_asset = s3assets.Asset(
            self,
            "TemplateAsset",
            path=template_full_path,
        )

        # Create the Service Catalog product referencing the CF template
        sc_product = servicecatalog.CfnCloudFormationProduct(
            self,
            "StudioUser",
            owner="SageMakerStudio",
            provisioning_artifact_parameters=[
                servicecatalog.CfnCloudFormationProduct.ProvisioningArtifactPropertiesProperty(
                    info={"LoadTemplateFromURL": s3_asset.s3_url}
                )
            ],
            name="StudioUser",
        )

        # Create the Porduct Portfolio
        sc_portfolio = servicecatalog.CfnPortfolio(
            self,
            "SageMakerPortfolio",
            display_name="SageMakerPortfolio",
            provider_name="SageMakerTemplate",
        )

        # Associate the Studio User Template to the Portfolio
        servicecatalog.CfnPortfolioProductAssociation(
            self,
            "ProductAssociation",
            portfolio_id=sc_portfolio.ref,
            product_id=sc_product.ref,
        )

        # creat a role and associate it with the portfolio
        sc_role = iam.Role(
            self,
            "StudioAdminRole",
            assumed_by=iam.AnyPrincipal(),
            role_name="SageMakerStudioAdminRole",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AWSServiceCatalogEndUserFullAccess"
                ),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSageMakerFullAccess"
                ),
            ],
        )
        sc_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "sagemaker:CreateUserProfile",
                ],
                resources=["*"],
            )
        )
        sc_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "lambda:InvokeFunction",
                ],
                resources=[studio_user_lambda.provider.service_token],
            )
        )
        sc_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=["*"],
            )
        )

        cdk.CfnOutput(
            self,
            "SageMakerStudioAdminRole",
            value=sc_role.role_arn,
            description="SageMakerStudioAdminRole",
            # export_name="SageMakerStudioAdminRole",
        )

        servicecatalog.CfnPortfolioPrincipalAssociation(
            self,
            "PortfolioPrincipalAssociacion",
            portfolio_id=sc_portfolio.ref,
            principal_arn=sc_role.role_arn,
            principal_type="IAM",
        )
Esempio n. 29
0
    def __init__(self, scope: core.Construct, id: str, vpc: _ec2.IVpc,
                 url: str, LOAD_PARAMS: dict, **kwargs) -> None:
        """
        Defines an instance of the traffic generator.
        :param scope: construct scope
        :param id:    construct id
        :param vpc:   the VPC in which to host the traffic generator cluster
        :param url:   the URL to hit
        :param LOAD_PARAMS:   LOCUST load testing parameters
        """
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here):
        locust_cluster = _ecs.Cluster(self, "locustCluster", vpc=vpc)
        """
        Load Testing Service in Fargate Cluster
        """

        locust_task_def = _ecs.FargateTaskDefinition(
            self,
            "locustAppTaskDef",
        )

        locust_container = locust_task_def.add_container(
            "locustAppContainer",
            environment={
                "github_profile": "https://github.com/miztiik",
                "LOCUSTFILE_PATH": "/locustfile.py",
                "TARGET_URL": url,
                "LOCUST_OPTS":
                f"--clients={LOAD_PARAMS['NO_OF_CLIENTS']} --hatch-rate={LOAD_PARAMS['HATCH_RATE']} --run-time={LOAD_PARAMS['RUN_TIME']} --no-web --print-stats",
                # --clients The number of concurrent Locust users.
                # --hatch-rate The rate per second in which clients are spawned.
                # --run-time The number of seconds to run locust. ( Ensure enough time to hatch all users )
                "ADDTIONAL_CUSTOM_OPTIONS": "--reset-stats --print-stats"
            },
            image=_ecs.ContainerImage.from_registry(
                "mystique/xray-lambda-profiler:latest"),
            logging=_ecs.LogDrivers.aws_logs(stream_prefix="Mystique"))

        locust_container.add_port_mappings(
            _ecs.PortMapping(container_port=80, protocol=_ecs.Protocol.TCP))
        locust_container.add_port_mappings(
            _ecs.PortMapping(container_port=443, protocol=_ecs.Protocol.TCP))

        # Defines an AWS Lambda resource
        with open(
                "load_generator_stacks/lambda_src/create_fargate_run_task.py",
                encoding="utf8") as fp:
            create_fargate_run_task_fn_handler_code = fp.read()

        pub_subnet_ids = []
        for subnet in vpc.public_subnets:
            pub_subnet_ids.append(subnet.subnet_id)

        create_fargate_run_task_fn = _lambda.Function(
            self,
            id='triggerFargateRunTask',
            function_name="create_fargate_run_task_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(create_fargate_run_task_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(200),
            environment={
                "CLUSTER_NAME": locust_cluster.cluster_name,
                "TASK_DEFINITION": locust_task_def.task_definition_arn,
                "SUBNETS": json.dumps(pub_subnet_ids),
                "CONTAINER_NAME": locust_container.container_name,
                "NO_OF_TASKS": LOAD_PARAMS["NO_OF_TASKS"]
            },
            reserved_concurrent_executions=1,
        )

        roleStmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[locust_task_def.task_definition_arn],
            actions=["ecs:RunTask"])
        roleStmt1.sid = "AllowLambdaToCreateFargateRunTask"
        create_fargate_run_task_fn.add_to_role_policy(roleStmt1)

        # task_role = locust_task_def.execution_role.role_arn

        roleStmt2 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                locust_task_def.execution_role.role_arn,
                locust_task_def.task_role.role_arn
            ],
            actions=["iam:PassRole"])
        roleStmt2.sid = "AllowLambdaToPassRoleToRunTask"
        create_fargate_run_task_fn.add_to_role_policy(roleStmt2)

        # Create a trigger for our 'create_fargate_run_task' lambda Function
        gen_load = trigger_run_task(self,
                                    "triggerLoadGeneratorTask",
                                    config_params={
                                        "RUN_TASK_FN_ARN":
                                        create_fargate_run_task_fn.function_arn
                                    },
                                    message=[{
                                        "RUN_TASK_FN_ARN":
                                        create_fargate_run_task_fn.function_arn
                                    }])
        """
        locust_service = _ecs.FargateService(self, 'locustAppService',
                                             cluster=locust_cluster,
                                             task_definition=locust_task_def,
                                             desired_count=2,
                                             assign_public_ip=True,
                                             service_name=f"{global_args.OWNER}-LocustLoadGenerator"
                                             )
        """
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "LocustClusterName",
            value=f"{locust_cluster.cluster_name}",
            export_name="locustClusterName",
            description=
            "The fargate cluster to generate load on APIs using Locust")
    def __init__(self, scope: core.Construct, id: str, vpc,
                 ec2_instance_type: str, es_endpoint_param_name: str,
                 es_region_param_name: str, stack_log_level: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Read BootStrap Script):
        try:
            with open(
                    "elastic_fluent_bit_kibana/stacks/back_end/bootstrap_scripts/deploy_app.sh",
                    encoding="utf-8",
                    mode="r") as f:
                user_data = f.read()
        except OSError as e:
            print("Unable to read UserData script")
            raise e

        # Get the latest AMI from AWS SSM
        linux_ami = _ec2.AmazonLinuxImage(
            generation=_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)

        # Get the latest ami
        amzn_linux_ami = _ec2.MachineImage.latest_amazon_linux(
            generation=_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)
        # ec2 Instance Role
        _instance_role = _iam.Role(
            self,
            "webAppClientRole",
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSSMManagedInstanceCore")
            ])

        # Allow CW Agent to create Logs
        _instance_role.add_to_policy(
            _iam.PolicyStatement(actions=["logs:Create*", "logs:PutLogEvents"],
                                 resources=["arn:aws:logs:*:*:*"]))

        # Allow Access to ElasticSearch Domain
        # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html#es-ac-types-resource
        _instance_role.add_to_policy(
            _iam.PolicyStatement(actions=[
                "es:Describe*",
                "es:List*",
                "es:ESHttpPost",
                "es:ESHttpPut",
            ],
                                 resources=["*"]))

        # fluent_bit_server Instance
        self.fluent_bit_server = _ec2.Instance(
            self,
            "fluentBitLogRouter",
            instance_type=_ec2.InstanceType(
                instance_type_identifier=f"{ec2_instance_type}"),
            instance_name="fluent_bit_log_router_01",
            machine_image=amzn_linux_ami,
            vpc=vpc,
            vpc_subnets=_ec2.SubnetSelection(
                subnet_type=_ec2.SubnetType.PUBLIC),
            role=_instance_role,
            user_data=_ec2.UserData.custom(user_data))

        # Allow Web Traffic to WebServer
        self.fluent_bit_server.connections.allow_from_any_ipv4(
            _ec2.Port.tcp(80), description="Allow Incoming HTTP Traffic")

        self.fluent_bit_server.connections.allow_from(
            other=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            port_range=_ec2.Port.tcp(443),
            description="Allow Incoming FluentBit Traffic")

        # Allow CW Agent to create Logs
        _instance_role.add_to_policy(
            _iam.PolicyStatement(actions=["logs:Create*", "logs:PutLogEvents"],
                                 resources=["arn:aws:logs:*:*:*"]))

        # Let us prepare our FluentBit Configuration Script

        # Use the script below, if you have a pre-written script, if not use the parts below to ASSEMBLE the script
        # Read BootStrap Script):
        try:
            with open(
                    "elastic_fluent_bit_kibana/stacks/back_end/bootstrap_scripts/configure_fluent_bit.sh",
                    encoding="utf-8",
                    mode="r") as f:
                bash_commands_to_run = f.read()
        except OSError as e:
            print("Unable to read bash commands file")
            raise e

        es_endpoint = _ssm.StringParameter.value_for_string_parameter(
            self, es_endpoint_param_name)
        es_region = _ssm.StringParameter.value_for_string_parameter(
            self, es_region_param_name)

        bash_commands_to_run_01 = """
#!/bin/bash
set -ex
set -o pipefail

# version: 22Nov2020

##################################################
#############     SET GLOBALS     ################
##################################################

REPO_NAME="elastic-fluent-bit-kibana"

GIT_REPO_URL="https://github.com/miztiik/$REPO_NAME.git"

APP_DIR="/var/$REPO_NAME"

LOG_FILE="/var/log/miztiik-automation-configure-fluent-bit.log"

function install_fluent_bit(){
# https://docs.fluentbit.io/manual/installation/linux/amazon-linux
cat > '/etc/yum.repos.d/td-agent-bit.repo' << "EOF"
[td-agent-bit]
name = TD Agent Bit
baseurl = https://packages.fluentbit.io/amazonlinux/2/$basearch/
gpgcheck=1
gpgkey=https://packages.fluentbit.io/fluentbit.key
enabled=1
EOF

# Install the agent
sudo yum -y install td-agent-bit
sudo service td-agent-bit start
service td-agent-bit status
}

function create_config_files(){
    mkdir -p ${APP_DIR}
    cd ${APP_DIR}

echo "
[INPUT]
    name            tail
    path            /var/log/httpd/*log
    tag             automate_log_parse
    Path_Key        filename
[FILTER]
    Name    record_modifier
    Match   *
    Record  hostname    ${HOSTNAME}
    Record  project     elastic-fluent-bit-kibana-demo
    Add     user        Mystique
" > ${APP_DIR}/es.conf
"""

        bash_commands_to_run_02 = f"""
echo "
[OUTPUT]
    Name            es
    Match           automate_log*
    Host            {es_endpoint}
    Port            443
    tls             On
    AWS_Auth        On
    AWS_Region      {es_region}
    Index           miztiik_automation
    Type            app_logs
    Include_Tag_Key On
" >> ${{APP_DIR}}/es.conf
}}
        """

        bash_commands_to_run_03 = """

function configure_fluent_bit(){
# Stop the agent
sudo service td-agent-bit stop

# DO NOT DO THIS IN ANY SERIOUS CONFIG FILE
# Null the defaults and start fresh
> /etc/td-agent-bit/td-agent-bit.conf

echo "
[SERVICE]
    Flush 3
@INCLUDE ${APP_DIR}/es.conf
" > /etc/td-agent-bit/td-agent-bit.conf

sudo service td-agent-bit start
sudo service td-agent-bit status
}

install_fluent_bit >> "${LOG_FILE}"
create_config_files >> "${LOG_FILE}"
configure_fluent_bit >> "${LOG_FILE}"
"""

        bash_commands_to_run = bash_commands_to_run_01 + \
            bash_commands_to_run_02 + bash_commands_to_run_03

        # Configure Fluent Bit using SSM Run Commands
        config_fluenbit_doc = CreateSsmRunCommandDocument(
            self,
            "configureFluentBitToEs",
            run_document_name="configureFluentBitToEs",
            _doc_desc="Bash script to configure FluentBit to send logs to ES",
            bash_commands_to_run=bash_commands_to_run,
            enable_log=False)

        # Create SSM Association to trigger SSM doucment to target (EC2)
        _run_commands_on_ec2 = _ssm.CfnAssociation(
            self,
            "runCommandsOnEc2",
            name=config_fluenbit_doc.get_ssm_linux_document_name,
            targets=[{
                "key": "InstanceIds",
                "values": [self.fluent_bit_server.instance_id]
            }])

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )
        output_1 = core.CfnOutput(
            self,
            "FluentBitPrivateIp",
            value=f"http://{self.fluent_bit_server.instance_private_ip}",
            description=f"Private IP of Fluent Bit Server on EC2")
        output_2 = core.CfnOutput(
            self,
            "FluentBitInstance",
            value=(f"https://console.aws.amazon.com/ec2/v2/home?region="
                   f"{core.Aws.REGION}"
                   f"#Instances:search="
                   f"{self.fluent_bit_server.instance_id}"
                   f";sort=instanceId"),
            description=
            f"Login to the instance using Systems Manager and use curl to access the Instance"
        )
        output_3 = core.CfnOutput(
            self,
            "AwsForFluentBit",
            value=
            (f"https://github.com/aws/aws-for-fluent-bit/tree/master/examples/fluent-bit/systems-manager-ec2"
             ),
            description=f"Amazon docs on fluent bit")

        output_4 = core.CfnOutput(
            self,
            "WebServerUrl",
            value=f"{self.fluent_bit_server.instance_public_dns_name}",
            description=f"Public IP of Web Server on EC2")
        output_5 = core.CfnOutput(
            self,
            "GenerateAccessTraffic",
            value=
            f"ab -n 10 -c 1 http://{self.fluent_bit_server.instance_public_dns_name}/",
            description=f"Public IP of Web Server on EC2")
        output_6 = core.CfnOutput(
            self,
            "GenerateFailedTraffic",
            value=
            f"ab -n 10 -c 1 http://{self.fluent_bit_server.instance_public_dns_name}/${{RANDOM}}",
            description=f"Public IP of Web Server on EC2")