Ejemplo n.º 1
0
    def __init__(self, parent, name, **kwargs):
        super().__init__(parent, name, **kwargs)

        vpc = ec2.Vpc(self, 'GreetingVpc', max_azs=2)

        # create an ECS cluster
        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)

        # add capacity to id
        cluster.add_capacity('greeter-capacity',
            instance_type=ec2.InstanceType('t2.micro'),
            min_capacity=3,
            max_capacity=3   
        )

        # Name service
        name_task_definition = ecs.Ec2TaskDefinition(self, "name-task-definition")

        name_container = name_task_definition.add_container(
            'name',
            image=ecs.ContainerImage.from_registry('nathanpeck/name'),
            memory_limit_mib=128
        )

        name_container.add_port_mappings(ecs.PortMapping(
            container_port=3000
        ))

        name_service = ecs.Ec2Service(self, "name-service",
            cluster=cluster,
            desired_count=2,
            task_definition=name_task_definition
        )

        # Greeting service
        greeting_task_definition = ecs.Ec2TaskDefinition(self, "greeting-task-definition")

        greeting_container = greeting_task_definition.add_container(
            'greeting',
            image=ecs.ContainerImage.from_registry('nathanpeck/greeting'),
            memory_limit_mib=128
        )

        greeting_container.add_port_mappings(ecs.PortMapping(
            container_port=3000
        ))

        greeting_service = ecs.Ec2Service(self, "greeting-service",
            cluster=cluster,
            desired_count=1,
            task_definition=greeting_task_definition
        )

        internal_lb = elbv2.ApplicationLoadBalancer(self, "internal",
            vpc=vpc,
            internet_facing=False    
        )

        # Internal load balancer for the backend services
        internal_listener = internal_lb.add_listener('PublicListener',
            port=80,
            open=True
        )

        internal_listener.add_target_groups('default',
            target_groups=[elbv2.ApplicationTargetGroup(
                self, 'default',
                vpc=vpc,
                protocol=elbv2.ApplicationProtocol.HTTP,
                port=80
            )]
        )

        internal_listener.add_targets('name',
            port=80,
            path_pattern='/name*',
            priority=1,
            targets=[name_service]
        )

        internal_listener.add_targets('greeting',
            port=80,
            path_pattern='/greeting*',
            priority=2,
            targets=[greeting_service]
        )

        # Greeter service
        greeter_task_definition = ecs.Ec2TaskDefinition(self, "greeter-task-definition")

        greeter_container = greeter_task_definition.add_container(
            'greeter',
            image=ecs.ContainerImage.from_registry('nathanpeck/greeter'),
            memory_limit_mib=128,
            environment={
                "GREETING_URL": 'http://' + internal_lb.load_balancer_dns_name + '/greeting',
                "NAME_URL": 'http://' + internal_lb.load_balancer_dns_name + '/name'
            }
        )

        greeter_container.add_port_mappings(ecs.PortMapping(
            container_port=3000
        ))

        greeter_service = ecs.Ec2Service(self, "greeter-service",
            cluster=cluster,
            desired_count=2,
            task_definition=greeter_task_definition
        )

        # Internet facing load balancer fo the frontend services
        external_lb = elbv2.ApplicationLoadBalancer(self, 'external',
            vpc=vpc,
            internet_facing=True
        )

        external_listener = external_lb.add_listener('PublicListener',
            port=80,
            open=True
        )

        external_listener.add_targets('greeter',
            port=80,
            targets=[greeter_service]
        )

        # output dns addresses
        self.internal_dns = core.CfnOutput(self, 'InternalDNS',
            export_name='greeter-app-internal',
            value=internal_lb.load_balancer_dns_name
        )
        self.external_dns = core.CfnOutput(self, 'ExternalDNS',
            export_name='ExternalDNS',
            value=external_lb.load_balancer_dns_name
        )
Ejemplo n.º 2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # Create a VPC
        myvpc = ec2.Vpc(self, "CDKVPC", cidr=vars.cidr)

        # SG for ELB creation
        websitefrontendSG = ec2.SecurityGroup(
            self,
            'websitefrontendSG',
            vpc=myvpc,
            security_group_name='websitefrontendSG')
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(80))
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(443))

        # Create ALB in VPC
        alb = elb.ApplicationLoadBalancer(
            self,
            'websitefrontend-public',
            vpc=myvpc,
            load_balancer_name='websitefrontend-public',
            security_group=websitefrontendSG,
            internet_facing=True)

        # Add target group to ALB
        catalogtargetgroup = elb.ApplicationTargetGroup(
            self,
            'CatalogTargetGroup',
            port=80,
            vpc=myvpc,
            target_type=elb.TargetType.IP)

        if not vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(
                self,
                'alblistenerhttp',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=80)

        if vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(self,
                                                      'alblistenerhttp',
                                                      load_balancer=alb,
                                                      port=80)
            elb.ApplicationListenerRule(self,
                                        'httpredirectionrule',
                                        listener=alblistenerhttp,
                                        redirect_response=elb.RedirectResponse(
                                            status_code='HTTP_301',
                                            port='443',
                                            protocol='HTTPS'))
            # OPTIONAL - Add https listener to ALB & attach certificate
            alblistenerhttps = elb.ApplicationListener(
                self,
                'alblistenerhttps',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=443,
                certificate_arns=[vars.sslcert_arn])

            # OPTIONAL - Redirect HTTP to HTTPS
            alblistenerhttp.add_redirect_response(id='redirectionrule',
                                                  port='443',
                                                  status_code='HTTP_301',
                                                  protocol='HTTPS')

        if vars.customdomain:
            # OPTIONAL - Update DNS with ALB
            webshopxyz_zone = r53.HostedZone.from_hosted_zone_attributes(
                self,
                id='customdomain',
                hosted_zone_id=vars.hosted_zone_id,
                zone_name=vars.zone_name)
            webshop_root_record = r53.ARecord(
                self,
                'ALBAliasRecord',
                zone=webshopxyz_zone,
                target=r53.RecordTarget.from_alias(
                    alias.LoadBalancerTarget(alb)))

        # SG for ECS creation
        ECSSG = ec2.SecurityGroup(self,
                                  'ECSSecurityGroup',
                                  vpc=myvpc,
                                  security_group_name='ECS')
        ECSSG.add_ingress_rule(peer=websitefrontendSG,
                               connection=ec2.Port.tcp(80))

        # SG for MySQL creation
        MySQLSG = ec2.SecurityGroup(self,
                                    'DBSecurityGroup',
                                    vpc=myvpc,
                                    security_group_name='DB')
        MySQLSG.add_ingress_rule(peer=ECSSG, connection=ec2.Port.tcp(3306))

        # Create DB subnet group
        subnetlist = []
        for subnet in myvpc.private_subnets:
            subnetlist.append(subnet.subnet_id)
        subnetgr = rds.CfnDBSubnetGroup(
            self,
            'democlustersubnetgroup',
            db_subnet_group_name='democlustersubnetgroup',
            db_subnet_group_description='DemoCluster',
            subnet_ids=subnetlist)

        # Create secret db passwd
        secret = sm.SecretStringGenerator(
            exclude_characters="\"'@/\\",
            secret_string_template='{"username": "******"}',
            generate_string_key='password',
            password_length=40)
        dbpass = sm.Secret(self,
                           'democlusterpass',
                           secret_name='democlusterpass',
                           generate_secret_string=secret)

        # Create Aurora serverless MySQL instance
        dbcluster = rds.CfnDBCluster(
            self,
            'DemoCluster',
            engine='aurora',
            engine_mode='serverless',
            engine_version='5.6',
            db_cluster_identifier='DemoCluster',
            master_username=dbpass.secret_value_from_json(
                'username').to_string(),
            master_user_password=dbpass.secret_value_from_json(
                'password').to_string(),
            storage_encrypted=True,
            port=3306,
            vpc_security_group_ids=[MySQLSG.security_group_id],
            scaling_configuration=rds.CfnDBCluster.
            ScalingConfigurationProperty(auto_pause=True,
                                         max_capacity=4,
                                         min_capacity=1,
                                         seconds_until_auto_pause=300),
            db_subnet_group_name=subnetgr.db_subnet_group_name)
        dbcluster.add_override('DependsOn', 'democlustersubnetgroup')

        # Attach database to secret
        attach = sm.CfnSecretTargetAttachment(
            self,
            'RDSAttachment',
            secret_id=dbpass.secret_arn,
            target_id=dbcluster.ref,
            target_type='AWS::RDS::DBCluster')

        # Upload image into ECR repo
        ecrdemoimage = ecra.DockerImageAsset(self,
                                             'ecrdemoimage',
                                             directory='../',
                                             repository_name='demorepo',
                                             exclude=['cdk.out'])

        # Create ECS fargate cluster
        ecscluster = ecs.Cluster(self, "ecsCluster", vpc=myvpc)

        # Create task role for productsCatalogTask
        getsecretpolicystatement = iam.PolicyStatement(actions=[
            "secretsmanager:GetResourcePolicy",
            "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret",
            "secretsmanager:ListSecretVersionIds"
        ],
                                                       resources=[
                                                           dbpass.secret_arn
                                                       ],
                                                       effect=iam.Effect.ALLOW)
        getsecretpolicydocument = iam.PolicyDocument(
            statements=[getsecretpolicystatement])
        taskrole = iam.Role(
            self,
            'TaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            role_name='TaskRoleforproductsCatalogTask',
            inline_policies=[getsecretpolicydocument])

        # Create task definition
        taskdefinition = ecs.FargateTaskDefinition(self,
                                                   'productsCatalogTask',
                                                   cpu=1024,
                                                   memory_limit_mib=2048,
                                                   task_role=taskrole)

        # Add container to task definition
        productscatalogcontainer = taskdefinition.add_container(
            'productscatalogcontainer',
            image=ecs.ContainerImage.from_docker_image_asset(
                asset=ecrdemoimage),
            environment={
                "region": vars.region,
                "secretname": "democlusterpass"
            })
        productscatalogcontainer.add_port_mappings(
            ecs.PortMapping(container_port=80, host_port=80))

        # Create service and associate it with the cluster
        catalogservice = ecs.FargateService(
            self,
            'catalogservice',
            task_definition=taskdefinition,
            assign_public_ip=False,
            security_group=ECSSG,
            vpc_subnets=ec2.SubnetSelection(subnets=myvpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnets),
            cluster=ecscluster,
            desired_count=2)

        # Add autoscaling to the service
        scaling = catalogservice.auto_scale_task_count(max_capacity=20,
                                                       min_capacity=1)
        scaling.scale_on_cpu_utilization(
            'ScaleOnCPU',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(amount=1),
            scale_out_cooldown=core.Duration.seconds(amount=0))

        # Associate the fargate service with load balancer targetgroup
        catalogservice.attach_to_application_target_group(catalogtargetgroup)
Ejemplo n.º 3
0
    "west-onetest-task",
    #network_mode=ecs.NetworkMode.AWS_VPC,
)

voncweb_container = task_definition.add_container(
    "west-onetest-voncwebserver",
    image=ecs.ContainerImage.from_registry(ecr_repo_voncweb),
    cpu=100,
    memory_limit_mib=256,
    essential=True,
    environment={
        'USE': 'me',
        'LAGI': 'ddd'
    })
port_mapping = ecs.PortMapping(container_port=3000,
                               host_port=443,
                               protocol=ecs.Protocol.TCP)
voncweb_container.add_port_mappings(port_mapping)

dispatcher_container = task_definition.add_container(
    "west-onetest-dispatcher",
    image=ecs.ContainerImage.from_registry(ecr_repo_dispatcher),
    cpu=100,
    memory_limit_mib=256,
    essential=True,
    environment={
        'USE': 'me',
        'LAGI': 'ddd'
    })
port_mapping = ecs.PortMapping(container_port=5000,
                               host_port=5000,
Ejemplo n.º 4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc = vpc = ec2.Vpc(
            self, "responder-Vpc",
            max_azs=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    name="public",
                    subnet_type=ec2.SubnetType.PUBLIC,
                )
            ],
            nat_gateways=0,
        )

        # ec2.Vpc.from_lookup(
        #     self, 'responder-Vpc', vpc_id="vpc-e7a36481")

        role = iam.Role.from_role_arn(
            self, "Role", os.getenv("ARN_URI"))

        # <3>
        cluster = ecs.Cluster(
            self, "responder-Cluster",
            vpc=vpc,
        )

        cluster.add_capacity("AutoScalingCapacity",
                             instance_type=ec2.InstanceType("t2.micro"),
                             desired_capacity=3
                             )

        # <4>
        taskdef = ecs.FargateTaskDefinition(
            self, "responder-TaskDef",
            cpu=1024,  # 1 CPU
            memory_limit_mib=4096,  # 4GB RAM
        )

        ecr_repository = ecr.Repository(
            self, 'responder-Repository',
            repository_name='responder'
        )

        container = taskdef.add_container(
            "responder-Container",
            image=ecs.ContainerImage.from_ecr_repository(ecr_repository),
            memory_limit_mib=4000,
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="responder",
                log_retention=aws_logs.RetentionDays.ONE_DAY
            ),
        )

        container.add_port_mappings(
            ecs.PortMapping(container_port=80, host_port=80)
        )

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "responder-FargateService",
            cluster=cluster,
            task_definition=taskdef,
            listener_port=80
        )

        # Store parameters in SSM
        ssm.StringParameter(
            self, "ECS_CLUSTER_NAME",
            parameter_name="ECS_CLUSTER_NAME",
            string_value=cluster.cluster_name,
        )
        ssm.StringParameter(
            self, "ECS_TASK_DEFINITION_ARN",
            parameter_name="ECS_TASK_DEFINITION_ARN",
            string_value=taskdef.task_definition_arn
        )
        ssm.StringParameter(
            self, "ECS_TASK_VPC_SUBNET_1",
            parameter_name="ECS_TASK_VPC_SUBNET_1",
            string_value=vpc.public_subnets[0].subnet_id
        )
        ssm.StringParameter(
            self, "CONTAINER_NAME",
            parameter_name="CONTAINER_NAME",
            string_value=container.container_name
        )
        ssm.StringParameter(
            self, "ECS_CLUSTER_ROLE",
            parameter_name="ECS_CLUSTER_ROLE",
            string_value=role.role_name,
        )

        core.CfnOutput(self,
                       "ClusterName",
                       value=cluster.cluster_name)
        core.CfnOutput(self,
                       "TaskDefinitionArn",
                       value=taskdef.task_definition_arn)
        core.CfnOutput(
            self,
            "LoadBalancerDNS",
            value=fargate_service.load_balancer.load_balancer_dns_name,
        )
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        vpc = _ec2.Vpc(self,
                       "ecs-vpc",
                       cidr="10.0.0.0/16",
                       nat_gateways=1,
                       max_azs=3)

        clusterAdmin = _iam.Role(self,
                                 "AdminRole",
                                 assumed_by=_iam.AccountRootPrincipal())

        cluster = _ecs.Cluster(self, "ecs-cluster", vpc=vpc)

        logging = _ecs.AwsLogDriver(stream_prefix="ecs-logs")

        taskRole = _iam.Role(
            self,
            f"ecs-taskRole-{cdk.Stack.stack_name}",
            role_name=f"ecs-taskRole-{cdk.Stack.stack_name}",
            assumed_by=_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        # ECS Contructs

        executionRolePolicy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=['*'],
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage",
                "logs:CreateLogStream", "logs:PutLogEvents"
            ])

        taskDef = _ecs.FargateTaskDefinition(self,
                                             "ecs-taskdef",
                                             task_role=taskRole)

        taskDef.add_to_execution_role_policy(executionRolePolicy)

        container = taskDef.add_container(
            'flask-app',
            image=_ecs.ContainerImage.from_registry(
                "nikunjv/flask-image:blue"),
            memory_limit_mib=256,
            cpu=256,
            logging=logging)

        container.add_port_mappings(
            _ecs.PortMapping(container_port=5000, protocol=_ecs.Protocol.TCP))

        fargateService = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "ecs-service",
            cluster=cluster,
            task_definition=taskDef,
            public_load_balancer=True,
            desired_count=3,
            listener_port=80)

        scaling = fargateService.service.auto_scale_task_count(max_capacity=6)

        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=10,
            scale_in_cooldown=cdk.Duration.seconds(300),
            scale_out_cooldown=cdk.Duration.seconds(300))

        # PIPELINE CONSTRUCTS

        # ECR Repo

        ecrRepo = ecr.Repository(self, "EcrRepo")

        gitHubSource = codebuild.Source.git_hub(
            owner='samuelhailemariam',
            repo='aws-ecs-fargate-cicd-cdk',
            webhook=True,
            webhook_filters=[
                codebuild.FilterGroup.in_event_of(
                    codebuild.EventAction.PUSH).and_branch_is('main'),
            ])

        # CODEBUILD - project

        project = codebuild.Project(
            self,
            "ECSProject",
            project_name=cdk.Aws.STACK_NAME,
            source=gitHubSource,
            environment=codebuild.BuildEnvironment(
                build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_2,
                privileged=True),
            environment_variables={
                "CLUSTER_NAME": {
                    'value': cluster.cluster_name
                },
                "ECR_REPO_URI": {
                    'value': ecrRepo.repository_uri
                }
            },
            build_spec=codebuild.BuildSpec.from_object({
                'version': "0.2",
                'phases': {
                    'pre_build': {
                        'commands': [
                            'env',
                            'export TAG=${CODEBUILD_RESOLVED_SOURCE_VERSION}'
                        ]
                    },
                    'build': {
                        'commands': [
                            'cd docker-app',
                            'docker build -t $ECR_REPO_URI:$TAG .',
                            '$(aws ecr get-login --no-include-email)',
                            'docker push $ECR_REPO_URI:$TAG'
                        ]
                    },
                    'post_build': {
                        'commands': [
                            'echo "In Post-Build Stage"', 'cd ..',
                            "printf '[{\"name\":\"flask-app\",\"imageUri\":\"%s\"}]' $ECR_REPO_URI:$TAG > imagedefinitions.json",
                            "pwd; ls -al; cat imagedefinitions.json"
                        ]
                    }
                },
                'artifacts': {
                    'files': ['imagedefinitions.json']
                }
            }))

        # PIPELINE ACTIONS

        sourceOutput = codepipeline.Artifact()
        buildOutput = codepipeline.Artifact()

        sourceAction = codepipeline_actions.GitHubSourceAction(
            action_name='GitHub_Source',
            owner='samuelhailemariam',
            repo='aws-ecs-fargate-cicd-cdk',
            branch='master',
            oauth_token=cdk.SecretValue.secrets_manager("/my/github/token"),
            output=sourceOutput)

        buildAction = codepipeline_actions.CodeBuildAction(
            action_name='codeBuild',
            project=project,
            input=sourceOutput,
            outputs=[buildOutput])

        manualApprovalAction = codepipeline_actions.ManualApprovalAction(
            action_name='Approve')

        deployAction = codepipeline_actions.EcsDeployAction(
            action_name='DeployAction',
            service=fargateService.service,
            image_file=codepipeline.ArtifactPath(buildOutput,
                                                 'imagedefinitions.json'))

        pipeline = codepipeline.Pipeline(self, "ECSPipeline")

        source_stage = pipeline.add_stage(stage_name="Source",
                                          actions=[sourceAction])

        build_stage = pipeline.add_stage(stage_name="Build",
                                         actions=[buildAction])

        approve_stage = pipeline.add_stage(stage_name="Approve",
                                           actions=[manualApprovalAction])

        deploy_stage = pipeline.add_stage(stage_name="Deploy-to-ECS",
                                          actions=[deployAction])

        ecrRepo.grant_pull_push(project.role)

        project.add_to_role_policy(
            _iam.PolicyStatement(resources=['cluster.cluster_arn'],
                                 actions=[
                                     "ecs:DescribeCluster",
                                     "ecr:GetAuthorizationToken",
                                     "ecr:BatchCheckLayerAvailability",
                                     "ecr:BatchGetImage",
                                     "ecr:GetDownloadUrlForLayer"
                                 ]))

        # OUTPUT

        cdk.CfnOutput(
            self,
            "LoadBlancer-DNS",
            value=fargateService.load_balancer.load_balancer_dns_name)
    def __init__(self, scope: core.Construct, construct_id: str,
                 properties: WordpressStackProperties, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        database = rds.ServerlessCluster(
            self,
            "WordpressServerless",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            default_database_name="WordpressDatabase",
            vpc=properties.vpc,
            scaling=rds.ServerlessScalingOptions(
                auto_pause=core.Duration.seconds(0)),
            deletion_protection=False,
            backup_retention=core.Duration.days(7),
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        file_system = efs.FileSystem(
            self,
            "WebRoot",
            vpc=properties.vpc,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
        )

        # docker context directory
        docker_context_path = os.path.dirname(__file__) + "../../src"

        # upload images to ecr
        nginx_image = ecr_assets.DockerImageAsset(
            self,
            "Nginx",
            directory=docker_context_path,
            file="Docker.nginx",
        )

        wordpress_image = ecr_assets.DockerImageAsset(
            self,
            "Php",
            directory=docker_context_path,
            file="Docker.wordpress",
        )

        cluster = ecs.Cluster(self,
                              'ComputeResourceProvider',
                              vpc=properties.vpc)

        wordpress_volume = ecs.Volume(
            name="WebRoot",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=file_system.file_system_id))

        event_task = ecs.FargateTaskDefinition(self,
                                               "WordpressTask",
                                               volumes=[wordpress_volume])

        #
        # webserver
        #
        nginx_container = event_task.add_container(
            "Nginx",
            image=ecs.ContainerImage.from_docker_image_asset(nginx_image))

        nginx_container.add_port_mappings(ecs.PortMapping(container_port=80))

        nginx_container_volume_mount_point = ecs.MountPoint(
            read_only=True,
            container_path="/var/www/html",
            source_volume=wordpress_volume.name)
        nginx_container.add_mount_points(nginx_container_volume_mount_point)

        #
        # application server
        #
        app_container = event_task.add_container(
            "Php",
            environment={
                'WORDPRESS_DB_HOST': database.cluster_endpoint.hostname,
                'WORDPRESS_TABLE_PREFIX': 'wp_'
            },
            secrets={
                'WORDPRESS_DB_USER':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="username"),
                'WORDPRESS_DB_PASSWORD':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="password"),
                'WORDPRESS_DB_NAME':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="dbname"),
            },
            image=ecs.ContainerImage.from_docker_image_asset(wordpress_image))
        app_container.add_port_mappings(ecs.PortMapping(container_port=9000))

        container_volume_mount_point = ecs.MountPoint(
            read_only=False,
            container_path="/var/www/html",
            source_volume=wordpress_volume.name)
        app_container.add_mount_points(container_volume_mount_point)

        #
        # create service
        #
        wordpress_service = ecs.FargateService(
            self,
            "InternalService",
            task_definition=event_task,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            cluster=cluster,
        )

        #
        # scaling
        #
        scaling = wordpress_service.auto_scale_task_count(min_capacity=2,
                                                          max_capacity=50)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=85,
            scale_in_cooldown=core.Duration.seconds(120),
            scale_out_cooldown=core.Duration.seconds(30),
        )

        #
        # network acl
        #
        database.connections.allow_default_port_from(wordpress_service,
                                                     "wordpress access to db")
        file_system.connections.allow_default_port_from(wordpress_service)

        #
        # external access
        #
        wordpress_service.connections.allow_from(
            other=properties.load_balancer, port_range=ec2.Port.tcp(80))

        http_listener = properties.load_balancer.add_listener(
            "HttpListener",
            port=80,
        )

        http_listener.add_targets(
            "HttpServiceTarget",
            protocol=elbv2.ApplicationProtocol.HTTP,
            targets=[wordpress_service],
            health_check=elbv2.HealthCheck(healthy_http_codes="200,301,302"))
Ejemplo n.º 7
0
    def __init__(self, scope: cdk.Construct, id: str, vpc: ec2.Vpc,
                 s3_bucket_name: str, certificate: acm.Certificate,
                 consoleme_alb: lb.ApplicationLoadBalancer,
                 consoleme_sg: ec2.SecurityGroup, task_role_arn: str,
                 task_execution_role_arn: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ECS Task definition and volumes

        if USE_PUBLIC_DOCKER_IMAGE is True:
            docker_image = ecs.ContainerImage.from_registry(DOCKER_IMAGE)
        else:
            docker_image = ecs.ContainerImage.from_docker_image_asset(
                ecr_assets.DockerImageAsset(self,
                                            "ConsoleMeCustomImage",
                                            directory="../"))

        imported_task_role = iam.Role.from_role_arn(self,
                                                    "ImportedTaskRole",
                                                    role_arn=task_role_arn)

        imported_task_execution_role = iam.Role.from_role_arn(
            self,
            "ImportedTaskExecutionRole",
            role_arn=task_execution_role_arn)

        consoleme_ecs_task_definition = ecs.FargateTaskDefinition(
            self,
            "ConsoleMeTaskDefinition",
            cpu=2048,
            memory_limit_mib=4096,
            execution_role=imported_task_execution_role,
            task_role=imported_task_role,
        )

        # ECS Container definition, service, target group and ALB attachment

        consoleme_ecs_task_definition.add_container(
            "Container",
            image=docker_image,
            privileged=False,
            port_mappings=[
                ecs.PortMapping(container_port=8081,
                                host_port=8081,
                                protocol=ecs.Protocol.TCP)
            ],
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="ContainerLogs-",
                log_retention=logs.RetentionDays.ONE_WEEK,
            ),
            environment={
                "SETUPTOOLS_USE_DISTUTILS": "stdlib",
                "CONSOLEME_CONFIG_S3":
                "s3://" + s3_bucket_name + "/config.yaml",
                "EC2_REGION": self.region,
            },
            working_directory="/apps/consoleme",
            command=[
                "bash",
                "-c",
                "python scripts/retrieve_or_decode_configuration.py; python consoleme/__main__.py",
            ],
        )

        consoleme_ecs_task_definition.add_container(
            "CeleryContainer",
            image=docker_image,
            privileged=False,
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="CeleryContainerLogs-",
                log_retention=logs.RetentionDays.ONE_WEEK,
            ),
            environment={
                "SETUPTOOLS_USE_DISTUTILS": "stdlib",
                "CONSOLEME_CONFIG_S3":
                "s3://" + s3_bucket_name + "/config.yaml",
                "COLUMNS": "80",
                "EC2_REGION": self.region,
            },
            command=[
                "bash",
                "-c",
                "python scripts/retrieve_or_decode_configuration.py; python scripts/initialize_redis_oss.py; celery -A consoleme.celery_tasks.celery_tasks worker -l DEBUG -B -E --concurrency=8",
            ],
        )

        # ECS cluster

        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)

        consoleme_imported_alb = (
            lb.ApplicationLoadBalancer.
            from_application_load_balancer_attributes(
                self,
                "ConsoleMeImportedALB",
                load_balancer_arn=consoleme_alb.load_balancer_arn,
                vpc=vpc,
                security_group_id=consoleme_sg.security_group_id,
                load_balancer_dns_name=consoleme_alb.load_balancer_dns_name,
            ))

        consoleme_ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "Service",
            cluster=cluster,
            task_definition=consoleme_ecs_task_definition,
            load_balancer=consoleme_imported_alb,
            security_groups=[consoleme_sg],
            open_listener=False,
        )

        consoleme_ecs_service.target_group.configure_health_check(
            path="/", enabled=True, healthy_http_codes="200-302")

        consoleme_ecs_service_scaling_target = applicationautoscaling.ScalableTarget(
            self,
            "AutoScalingGroup",
            max_capacity=MAX_CAPACITY,
            min_capacity=MIN_CAPACITY,
            resource_id="service/" + cluster.cluster_name + "/" +
            consoleme_ecs_service.service.service_name,
            scalable_dimension="ecs:service:DesiredCount",
            service_namespace=applicationautoscaling.ServiceNamespace.ECS,
            role=iam.Role(
                self,
                "AutoScaleRole",
                assumed_by=iam.ServicePrincipal(
                    service="ecs-tasks.amazonaws.com"),
                description="Role for ECS auto scaling group",
                managed_policies=[
                    iam.ManagedPolicy.from_managed_policy_arn(
                        self,
                        "AutoScalingManagedPolicy",
                        managed_policy_arn=
                        "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceAutoscaleRole",
                    )
                ],
            ),
        )

        applicationautoscaling.TargetTrackingScalingPolicy(
            self,
            "AutoScalingPolicy",
            scaling_target=consoleme_ecs_service_scaling_target,
            scale_in_cooldown=cdk.Duration.seconds(amount=10),
            scale_out_cooldown=cdk.Duration.seconds(amount=10),
            target_value=50,
            predefined_metric=applicationautoscaling.PredefinedMetric.
            ECS_SERVICE_AVERAGE_CPU_UTILIZATION,
        )

        consoleme_imported_alb.add_listener(
            "ConsoleMeALBListener",
            protocol=lb.ApplicationProtocol.HTTPS,
            port=443,
            certificates=[certificate],
            default_action=lb.ListenerAction.forward(
                target_groups=[consoleme_ecs_service.target_group]),
        )
Ejemplo n.º 8
0
    def __create_nlb_service(self, service_name: str, ctx: object):
        ctx_srv = getattr(ctx.inbound.services.nlb, service_name)

        ecs_task_role = self.__create_default_task_role(service_name)

        log_driver = ecs.LogDriver.aws_logs(log_group=self.log_group,
                                            stream_prefix=service_name)

        # create a Fargate task definition
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=f"{service_name}_task_definition",
            cpu=ctx_srv.size.cpu,
            memory_limit_mib=ctx_srv.size.ram,
            execution_role=self.ecs_exec_role,
            task_role=ecs_task_role,
        )

        # create a container definition and associate with the Fargate task
        container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
        container = ecs.ContainerDefinition(
            scope=self,
            id=f"{service_name}_container_definition",
            task_definition=task_definition,
            image=ecs.ContainerImage.from_ecr_repository(
                self.ecr_repository, "latest"),
            logging=log_driver,
            **container_vars)
        security_group = ec2.SecurityGroup(scope=self,
                                           id=f"{service_name}_sg",
                                           vpc=self.vpc)
        service = ecs.FargateService(
            scope=self,
            id=f"{service_name}_service",
            task_definition=task_definition,
            cluster=self.cluster,
            desired_count=getattr(ctx_srv, "desired_count",
                                  ctx.default_desired_count),
            service_name=service_name,
            security_group=security_group,
            health_check_grace_period=core.Duration.minutes(10))

        # map ports on the container
        for port in ctx_srv.ports:
            container.add_port_mappings(
                ecs.PortMapping(container_port=port,
                                host_port=port,
                                protocol=ecs.Protocol.TCP))
            # add a listener to network load balancer
            listener = self.load_balancer.add_listener(
                id=f"{service_name}_{port}", port=port)

            security_group.add_ingress_rule(
                ec2.Peer.ipv4(ctx.ingress_cidr), ec2.Port.tcp(port),
                f"Logstash ingress for {service_name}")

            target = (service).load_balancer_target(
                container_name=container.container_name, container_port=port)

            listener.add_targets(id=f"{service_name}_{port}_tg",
                                 port=port,
                                 targets=[target])

        scaling = service.auto_scale_task_count(
            max_capacity=ctx_srv.scaling.max_capacity,
            min_capacity=ctx_srv.scaling.min_capacity)

        scaling.scale_on_cpu_utilization(
            id="cpu_scaling",
            target_utilization_percent=ctx_srv.scaling.
            target_utilization_percent,
            scale_in_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_in_cooldown_seconds),
            scale_out_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_out_cooldown_seconds))
Ejemplo n.º 9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        cluster = ecs.Cluster(
            self, "EC2Cluster",
            vpc=vpc
        )

        security_group = ec2.SecurityGroup(
            self, "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=True,
        )

        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.all_tcp(),
            description="Allow all traffic"
        )

        app_target_group = elbv2.ApplicationTargetGroup(
            self, "AppTargetGroup",
            port=http_port,
            vpc=vpc,
            target_type=elbv2.TargetType.IP,
        )

        elastic_loadbalancer = elbv2.ApplicationLoadBalancer(
            self, "ALB",
            vpc=vpc,
            internet_facing=True,
            security_group=security_group,
        )

        app_listener = elbv2.ApplicationListener(
            self, "AppListener",
            load_balancer=elastic_loadbalancer,
            port=http_port,
            default_target_groups=[app_target_group],
        )

        task_definition = ecs.TaskDefinition(
            self, "TaskDefenition",
            compatibility=ecs.Compatibility.FARGATE,
            cpu=task_def_cpu,
            memory_mib=task_def_memory_mb,
        )

        container_defenition = ecs.ContainerDefinition(
            self, "ContainerDefenition",
            image=ecs.ContainerImage.from_registry("vulnerables/web-dvwa"),
            task_definition=task_definition,
            logging=ecs.AwsLogDriver(
                stream_prefix="DemoContainerLogs",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )

        container_defenition.add_port_mappings(
            ecs.PortMapping(
                container_port=http_port,
            )
        )

        fargate_service = ecs.FargateService(
            self, "FargateService",
            task_definition=task_definition,
            cluster=cluster,
            security_group=security_group,
        )

        fargate_service.attach_to_application_target_group(
            target_group=app_target_group,
        )

        core.CfnOutput(
        self, "LoadBalancerDNS",
        value=elastic_loadbalancer.load_balancer_dns_name
        )
Ejemplo n.º 10
0
    def __init__(self, scope: core.Construct, id: str, config: ContainerPipelineConfiguration, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(self, "TheVPC",
                      cidr="10.0.0.0/16",
                      nat_gateways=1,
                      )

        # IAM roles
        service_task_def_exe_role = iam.Role(
            self, "ServiceTaskDefExecutionRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
            )

        service_task_def_exe_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonECSTaskExecutionRolePolicy'))

        service_task_def_role = iam.Role(
            self, 'ServiceTaskDefTaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        )

        # Fargate cluster
        cluster = ecs.Cluster(
            scope=self,
            id="ecs-cluster",
            cluster_name=config.ProjectName + "-" + config.stage,
            vpc=vpc
        )

        load_balancer = elbv2.ApplicationLoadBalancer(
            self, "load_balancer",
            vpc=vpc,
            internet_facing=True
        )

        # Security Group
        service_sg = ec2.SecurityGroup(self, "service_sg", vpc=vpc)
        service_sg.connections.allow_from(load_balancer, ec2.Port.tcp(80));

        # ECR Repo
        image_repo = ecr.Repository.from_repository_name(self, "image_repo",
                                                         repository_name=config.ProjectName
                                                         )

        log_group = logs.LogGroup(self, "log_group",
                                  log_group_name=config.ProjectName + "-" + config.stage,
                                  removal_policy=core.RemovalPolicy.DESTROY,
                                  retention=None
                                  )
        # ECS Task Def
        fargate_task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id="fargate_task_definition",
            cpu=1024,
            memory_limit_mib=2048,
            execution_role=service_task_def_exe_role,
            task_role=service_task_def_role,
            family=config.ProjectName + "-" + config.stage
        )

        container = fargate_task_definition.add_container(
            id="fargate_task_container",
            image=ecs.ContainerImage.from_ecr_repository(repository=image_repo, tag='release')
        )

        container.add_port_mappings(ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP))

        # ECS Fargate Service
        fargate_service = ecs.FargateService(
            scope=self,
            id="fargate_service",
            security_group=service_sg,
            cluster=cluster,
            desired_count=2,
            deployment_controller=ecs.DeploymentController(type=ecs.DeploymentControllerType.ECS),
            task_definition=fargate_task_definition,
            service_name=config.ProjectName + "-" + config.stage
        )

        # Main Env
        listern_health_check_main = elbv2.HealthCheck(
            healthy_http_codes='200',
            interval=core.Duration.seconds(5),
            healthy_threshold_count=2,
            unhealthy_threshold_count=3,
            timeout=core.Duration.seconds(4)
        )
        # Test Env
        listern_health_check_test = elbv2.HealthCheck(
            healthy_http_codes='200',
            interval=core.Duration.seconds(5),
            healthy_threshold_count=2,
            unhealthy_threshold_count=3,
            timeout=core.Duration.seconds(4)
        )

        listener_main = load_balancer.add_listener("load_balancer_listener_1",
                                                   port=80,
                                                   )

        listern_main_targets = listener_main.add_targets("load_balancer_target_1", port=8080,
                                                         health_check=listern_health_check_main,
                                                         targets=[fargate_service]
                                                         )

        # Alarms: monitor 500s on target group
        aws_cloudwatch.Alarm(self, "TargetGroup5xx",
                             metric=listern_main_targets.metric_http_code_target(elbv2.HttpCodeTarget.TARGET_5XX_COUNT),
                             threshold=1,
                             evaluation_periods=1,
                             period=core.Duration.minutes(1)
                             )

        # Alarms: monitor unhealthy hosts on target group
        aws_cloudwatch.Alarm(self, "TargetGroupUnhealthyHosts",
                             metric=listern_main_targets.metric('UnHealthyHostCount'),
                             threshold=1,
                             evaluation_periods=1,
                             period=core.Duration.minutes(1)
                             )

        core.CfnOutput(self, "lburl",
                       value=load_balancer.load_balancer_dns_name,
                       export_name="LoadBalancerUrl"
                       )
Ejemplo n.º 11
0
    def __create_cloudmap_service(self, service_name: str, ctx: object):
        ctx_srv = getattr(ctx.inbound.services.cloudmap, service_name)

        ecs_task_role = self.__create_default_task_role(service_name)

        log_driver = ecs.LogDriver.aws_logs(log_group=self.log_group,
                                            stream_prefix=service_name)

        # create a Fargate task definition
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=f"{service_name}_task_definition",
            cpu=ctx_srv.size.cpu,
            memory_limit_mib=ctx_srv.size.ram,
            execution_role=self.ecs_exec_role,
            task_role=ecs_task_role,
        )

        # create a container definition and associate with the Fargate task
        container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
        container = ecs.ContainerDefinition(
            scope=self,
            id=f"{service_name}_container_definition",
            task_definition=task_definition,
            image=ecs.ContainerImage.from_ecr_repository(
                self.ecr_repository, "latest"),
            logging=log_driver,
            **container_vars)
        security_group = ec2.SecurityGroup(scope=self,
                                           id=f"{service_name}_sg",
                                           vpc=self.vpc)
        service = ecs.FargateService(scope=self,
                                     id=f"{service_name}_service",
                                     task_definition=task_definition,
                                     cluster=self.cluster,
                                     desired_count=getattr(
                                         ctx_srv, "desired_count",
                                         ctx.default_desired_count),
                                     service_name=service_name,
                                     security_group=security_group)

        for port in ctx_srv.ports:
            container.add_port_mappings(
                ecs.PortMapping(container_port=port,
                                host_port=port,
                                protocol=ecs.Protocol.TCP))
            security_group.add_ingress_rule(
                ec2.Peer.ipv4(ctx.ingress_cidr), ec2.Port.tcp(port),
                f"Logstash ingress for {service_name}")

        for port in ctx_srv.udp_ports:
            container.add_port_mappings(
                ecs.PortMapping(container_port=port,
                                host_port=port,
                                protocol=ecs.Protocol.UDP))
            security_group.add_ingress_rule(
                ec2.Peer.ipv4(ctx.ingress_cidr), ec2.Port.udp(port),
                f"Logstash ingress for {service_name}")

        scaling = service.auto_scale_task_count(
            max_capacity=ctx_srv.scaling.max_capacity,
            min_capacity=ctx_srv.scaling.min_capacity)

        scaling.scale_on_cpu_utilization(
            id="cpu_scaling",
            target_utilization_percent=ctx_srv.scaling.
            target_utilization_percent,
            scale_in_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_in_cooldown_seconds),
            scale_out_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_out_cooldown_seconds))

        cloudmap = awssd.PublicDnsNamespace.from_public_dns_namespace_attributes(
            scope=self,
            id=f"cloudmap_namespace",
            **ctx.inbound.namespace_props.dict())

        service.enable_cloud_map(cloud_map_namespace=cloudmap,
                                 dns_record_type=awssd.DnsRecordType("A"),
                                 dns_ttl=core.Duration.seconds(15))
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        dockerImageAsset = DockerImageAsset(
            self,
            f"{APPLICATION_NAME}",
            directory="../",
            file="Dockerfile",
            exclude=["cdk/node_modules", ".git", "cdk/cdk.out"],
        )

        vpc = ec2.Vpc(self, f"{APPLICATION_NAME}VPC", max_azs=3)
        cluster = ecs.Cluster(self, f"{APPLICATION_NAME}Cluster", vpc=vpc)

        app_task = ecs.FargateTaskDefinition(
            self,
            f"{APPLICATION_NAME}-task",
            family=f"{APPLICATION_NAME}-family",
            cpu=512,
            memory_limit_mib=2048,
        )

        dd_api_key = ssm.StringParameter.value_for_string_parameter(
            self, "/datadog/snyk_demo/dd_api_key", 1)

        DATADOG_AGENT_VARS["DD_API_KEY"] = dd_api_key

        java_service_container = app_task.add_container(
            f"{APPLICATION_NAME}-java-app",
            image=ecs.ContainerImage.from_docker_image_asset(dockerImageAsset),
            essential=True,
            docker_labels={
                "com.datadoghq.ad.instances":
                '[{"host": "%%host%%", "port": 6379}]',
                "com.datadoghq.ad.check_names": '["tomcat"]',
                "com.datadoghq.ad.init_configs": "[{}]",
            },
            environment=APP_ENV_VARS,
            logging=ecs.LogDrivers.firelens(
                options={
                    "Name": "datadog",
                    "Host": "http-intake.logs.datadoghq.com",
                    "TLS": "on",
                    "apikey": dd_api_key,
                    "dd_service": DD_SERVICE,
                    "dd_source": "tomcat",
                    "dd_tags": DD_TAGS,
                    "provider": "ecs",
                }),
        )

        datadog_agent_container = app_task.add_container(
            f"{APPLICATION_NAME}-datadog-agent",
            image=ecs.ContainerImage.from_registry(
                name="datadog/agent:latest"),
            essential=True,
            environment=DATADOG_AGENT_VARS,
        )

        # Port exposure for the containerized app
        java_service_container.add_port_mappings(
            ecs.PortMapping(container_port=8080, host_port=8080))

        # Mandatory port exposure for the Datadog agent
        datadog_agent_container.add_port_mappings(
            ecs.PortMapping(container_port=8126, host_port=8126))

        app_task_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            id=f"{APPLICATION_NAME}-service",
            service_name=f"{APPLICATION_NAME}",
            cluster=cluster,  # Required
            cpu=512,  # Default is 256
            desired_count=1,  # Default is 1
            task_definition=app_task,
            memory_limit_mib=2048,  # Default is 512
            listener_port=80,
            public_load_balancer=True,
            health_check_grace_period=core.Duration.seconds(120),
        )

        # Security Group to allow load balancer to communicate with ECS Containers.
        app_task_service.service.connections.allow_from_any_ipv4(
            ec2.Port.tcp(8080), f"{APPLICATION_NAME} app inbound")
Ejemplo n.º 13
0
    def __init__(self, scope: core.App, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        subnets = []
        subnets.append(
            aws_ec2.SubnetConfiguration(name="DeviceSubnet",
                                        subnet_type=aws_ec2.SubnetType.PUBLIC,
                                        cidr_mask=24))

        vpc = aws_ec2.Vpc(self,
                          "DeviceVpc",
                          max_azs=2,
                          subnet_configuration=subnets)

        # Iterate the private subnets
        selection = vpc.select_subnets(subnet_type=aws_ec2.SubnetType.PUBLIC)

        sg = aws_ec2.SecurityGroup(
            self,
            id="FarGateSecGroup",
            vpc=vpc,
            allow_all_outbound=True,
            description="Allow access to virtual device",
            security_group_name="Virtual Device Security Group")

        sg.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                            connection=aws_ec2.Port.tcp(80))

        rnd_suffix = create_random_name(4).lower()

        # pipeline requires versioned bucket
        bucket = aws_s3.Bucket(self,
                               "SourceBucket",
                               bucket_name="{}-{}-{}".format(
                                   props['namespace'].lower(),
                                   core.Aws.ACCOUNT_ID, core.Aws.REGION),
                               versioned=True,
                               removal_policy=core.RemovalPolicy.DESTROY)

        # ssm parameter to get bucket name later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterBucketName",
            parameter_name=f"{props['namespace']}-bucket",
            string_value=bucket.bucket_name,
            description='IoT playground pipeline bucket')

        # ecr repo to push docker container into
        ecr = aws_ecr.Repository(self,
                                 "ECR",
                                 repository_name=f"{props['namespace']}",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        # codebuild project meant to run in pipeline
        cb_docker_build = aws_codebuild.PipelineProject(
            self,
            "DockerBuild",
            project_name=f"{props['namespace']}-Docker-Build",
            build_spec=aws_codebuild.BuildSpec.from_source_filename(
                filename='docker/docker_build_buildspec.yml'),
            environment=aws_codebuild.BuildEnvironment(privileged=True, ),

            # pass the ecr repo uri into the codebuild project so codebuild knows where to push
            environment_variables={
                'ecr':
                aws_codebuild.BuildEnvironmentVariable(
                    value=ecr.repository_uri),
                'tag':
                aws_codebuild.BuildEnvironmentVariable(value='virtual_device')
            },
            description='Pipeline for CodeBuild',
            timeout=core.Duration.minutes(10),
        )
        # codebuild iam permissions to read write s3
        bucket.grant_read_write(cb_docker_build)

        # codebuild permissions to interact with ecr
        ecr.grant_pull_push(cb_docker_build)

        ecs_cluster = aws_ecs.Cluster(self, 'DeviceCluster', vpc=vpc)

        fargate_task_def = aws_ecs.FargateTaskDefinition(
            self,
            'DeviceTaskDef',
            cpu=512,
            memory_limit_mib=1024
            #network_mode=aws_ecs.NetworkMode.AWS_VPC,
        )

        # fargate_task_def.add_to_task_role_policy(aws_iam.PolicyStatement(
        #     effect=aws_iam.Effect.ALLOW,
        #     actions=[
        #         "s3:PutObject"],
        #     resources=["*"]
        # ))

        fargate_task_def.add_to_execution_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=[
                                        "ecr:GetAuthorizationToken",
                                        "ecr:BatchCheckLayerAvailability",
                                        "ecr:GetDownloadUrlForLayer",
                                        "ecr:BatchGetImage",
                                        "logs:CreateLogStream",
                                        "logs:PutLogEvents"
                                    ],
                                    resources=["*"]))

        container_image = aws_ecs.EcrImage(repository=ecr,
                                           tag="virtual_device")

        logging = aws_ecs.AwsLogDriver(stream_prefix="virtual_device")

        container = fargate_task_def.add_container("DeviceContainer",
                                                   image=container_image,
                                                   cpu=512,
                                                   memory_limit_mib=1024,
                                                   logging=logging,
                                                   essential=True)

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           host_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        container.add_port_mappings(port_mapping)

        # The code that defines your stack goes here
        table = aws_dynamodb.Table(self,
                                   "DeviceFactoryCatalog",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=3,
                                   write_capacity=3)

        function = aws_lambda.Function(
            self,
            "DeviceFactoryLambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("../lambdas/device_factory_lambda"),
            timeout=Duration.minutes(1))

        function.add_environment("BUCKET_NAME", bucket.bucket_name)
        function.add_environment("ECS_CLUSTER", ecs_cluster.cluster_name)
        function.add_environment("ECS_TASK_DEF",
                                 fargate_task_def.task_definition_arn)
        function.add_environment("DDB_TABLE_DEVICE_CATALOG", table.table_name)
        function.add_environment("SUBNET_1", selection.subnets[0].subnet_id)
        function.add_environment("SUBNET_2", selection.subnets[1].subnet_id)
        function.add_environment("SEC_GROUP", sg.security_group_id)

        table.grant_read_write_data(function)

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iot:*"],
                                    resources=["*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:PutObject", "s3:GetObject"],
                resources=["{}/*".format(bucket.bucket_arn)]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iam:PassRole"],
                                    resources=["arn:aws:iam::*:role/*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["ecs:RunTask", "ecs:StopTask"],
                                    resources=["*"]))

        api_gtw = aws_apigateway.LambdaRestApi(
            self,
            id="DeviceFactoryApi",
            rest_api_name="DeviceFactoryApi",
            handler=function)

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterDeviceFactoryEndpoint",
            parameter_name=f"{props['namespace']}-devicefactoryendpoint",
            string_value=api_gtw.url,
            description='IoT playground device factory endpoint')

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterEcrUri",
            parameter_name=f"{props['namespace']}-ecruri",
            string_value=ecr.repository_uri,
            description='IoT playground ECR URI')

        # ssm parameter to get cluster name
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterClusterName",
            parameter_name=f"{props['namespace']}-clustername",
            string_value=ecs_cluster.cluster_name,
            description='IoT playground Cluster Name')

        core.CfnOutput(
            self,
            "EcrUri",
            description="ECR URI",
            value=ecr.repository_uri,
        )

        core.CfnOutput(self,
                       "S3Bucket",
                       description="S3 Bucket",
                       value=bucket.bucket_name)

        core.CfnOutput(self,
                       "DeviceFactoryEndpoint",
                       description="Device Factory Endpoint",
                       value=api_gtw.url)

        self.output_props = props.copy()
        self.output_props['bucket'] = bucket
        self.output_props['cb_docker_build'] = cb_docker_build
Ejemplo n.º 14
0
    def setup_monitoring(self):
        vpc = self.vpc
        sg = self.sg
        nlb = self.nlb
        with open("./user_data/prometheus.yml") as f:
            prometheus_config = f.read()

        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(9090), 'prometheus')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(9100), 'prometheus node exporter')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(9091), 'prometheus pushgateway')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(3000), 'grafana')

        cluster = ecs.Cluster(self, "Monitoring", vpc=vpc)
        task = ecs.FargateTaskDefinition(self,
                                         id = 'MonitorTask',
                                         cpu = 512,
                                         memory_limit_mib = 2048
                                         #volumes = [ecs.Volume(name = cfgVolName)]
               )
        task.add_volume(name = 'prom_config')
        c_config = task.add_container('config-prometheus',
                                       image=ecs.ContainerImage.from_registry('bash'),
                                       essential=False,
                                       logging = ecs.LogDriver.aws_logs(stream_prefix="mon_config_prometheus",
                                                                        log_retention = aws_logs.RetentionDays.ONE_DAY
                                       ),
                                       command = [ "-c",
                                                   "echo $DATA | base64 -d - | tee /tmp/private/prometheus.yml"
                                                 ],
                                       environment = {'DATA' : cdk.Fn.base64(prometheus_config)}

        )
        c_config.add_mount_points(ecs.MountPoint(read_only = False, container_path='/tmp/private', source_volume='prom_config'))
        c_prometheus = task.add_container('prometheus',
                                          essential=False,
                                          image=ecs.ContainerImage.from_registry('prom/prometheus'),
                                          port_mappings = [ecs.PortMapping(container_port=9090)],
                                          command = [
                                              "--config.file=/etc/prometheus/private/prometheus.yml",
                                              "--storage.tsdb.path=/prometheus",
                                              "--web.console.libraries=/usr/share/prometheus/console_libraries",
                                              "--web.console.templates=/usr/share/prometheus/consoles"
                                          ],
                                          logging = ecs.LogDriver.aws_logs(stream_prefix="mon_prometheus",
                                                                        log_retention = aws_logs.RetentionDays.ONE_DAY
                                          ),

        )
        c_prometheus.add_mount_points(ecs.MountPoint(read_only = False, container_path='/etc/prometheus/private', source_volume='prom_config'))
        c_prometheus.add_container_dependencies(ecs.ContainerDependency(container=c_config, condition=ecs.ContainerDependencyCondition.COMPLETE))


        c_pushgateway = task.add_container('pushgateway',
                                           essential=False,
                                          image=ecs.ContainerImage.from_registry('prom/pushgateway'),
                                          port_mappings = [ecs.PortMapping(container_port=9091)]
        )

        c_grafana = task.add_container('grafana',
                                       essential=True,
                                       image=ecs.ContainerImage.from_registry('grafana/grafana'),
                                       port_mappings = [ecs.PortMapping(container_port=3000)]
        )

        service = ecs.FargateService(self, "EMQXMonitoring",
                                     security_group = self.sg,
                                     cluster = cluster,
                                     task_definition = task,
                                     desired_count = 1,
                                     assign_public_ip = True

        )

        listenerGrafana = nlb.add_listener('grafana', port = 3000);
        listenerPrometheus = nlb.add_listener('prometheus', port = 9090);
        listenerPushGateway = nlb.add_listener('pushgateway', port = 9091);

        listenerGrafana.add_targets(id = 'grafana', port=3000, targets = [service.load_balancer_target(
            container_name="grafana",
            container_port=3000
        )])
        listenerPrometheus.add_targets(id = 'prometheus', port=9090, targets=[service.load_balancer_target(
            container_name="prometheus",
            container_port=9090
        )])

        listenerPushGateway.add_targets(id = 'pushgateway', port=9091, targets=[service.load_balancer_target(
            container_name="pushgateway",
            container_port=9091
        )]) ,

        self.mon_lb = loadbalancer_dnsname
        core.CfnOutput(self, "Monitoring Grafana",
                       value = "%s:%d" % (self.mon_lb, 3000))
        core.CfnOutput(self, "Monitoring Prometheus",
                       value = "%s:%d" % (self.mon_lb, 9090))
Ejemplo n.º 15
0
    def __init__(self, scope: core.Stack, id: str, cluster, vpc, worker,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.cluster = cluster
        self.vpc = vpc
        self.worker = worker

        # Building a custom image for jenkins master.
        self.container_image = ecr.DockerImageAsset(
            self, "JenkinsMasterDockerImage", directory='./docker/master/')

        if config['DEFAULT']['fargate_enabled'] == "yes" or not config[
                'DEFAULT']['ec2_enabled'] == "yes":
            # Task definition details to define the Jenkins master container
            self.jenkins_task = ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                # image=ecs.ContainerImage.from_ecr_repository(self.container_image.repository),
                image=ecs.ContainerImage.from_docker_image_asset(
                    self.container_image),
                container_port=8080,
                enable_logging=True,
                environment={
                    # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters
                    'JAVA_OPTS':
                    '-Djenkins.install.runSetupWizard=false',
                    # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/README.md#getting-started
                    'CASC_JENKINS_CONFIG':
                    '/config-as-code.yaml',
                    'network_stack':
                    self.vpc.stack_name,
                    'cluster_stack':
                    self.cluster.stack_name,
                    'worker_stack':
                    self.worker.stack_name,
                    'cluster_arn':
                    self.cluster.cluster.cluster_arn,
                    'aws_region':
                    config['DEFAULT']['region'],
                    'jenkins_url':
                    config['DEFAULT']['jenkins_url'],
                    'subnet_ids':
                    ",".join(
                        [x.subnet_id for x in self.vpc.vpc.private_subnets]),
                    'security_group_ids':
                    self.worker.worker_security_group.security_group_id,
                    'execution_role_arn':
                    self.worker.worker_execution_role.role_arn,
                    'task_role_arn':
                    self.worker.worker_task_role.role_arn,
                    'worker_log_group':
                    self.worker.worker_logs_group.log_group_name,
                    'worker_log_stream_prefix':
                    self.worker.worker_log_stream.log_stream_name
                },
            )

            # Create the Jenkins master service
            self.jenkins_master_service_main = ecs_patterns.ApplicationLoadBalancedFargateService(
                self,
                "JenkinsMasterService",
                cpu=int(config['DEFAULT']['fargate_cpu']),
                memory_limit_mib=int(
                    config['DEFAULT']['fargate_memory_limit_mib']),
                cluster=self.cluster.cluster,
                desired_count=1,
                enable_ecs_managed_tags=True,
                task_image_options=self.jenkins_task,
                cloud_map_options=ecs.CloudMapOptions(
                    name="master", dns_record_type=sd.DnsRecordType('A')))

            self.jenkins_master_service = self.jenkins_master_service_main.service
            self.jenkins_master_task = self.jenkins_master_service.task_definition

        if config['DEFAULT']['ec2_enabled'] == "yes":
            self.jenkins_load_balancer = elb.ApplicationLoadBalancer(
                self,
                "JenkinsMasterELB",
                vpc=self.vpc.vpc,
                internet_facing=True,
            )

            self.listener = self.jenkins_load_balancer.add_listener("Listener",
                                                                    port=80)

            self.jenkins_master_task = ecs.Ec2TaskDefinition(
                self,
                "JenkinsMasterTaskDef",
                network_mode=ecs.NetworkMode.AWS_VPC,
                volumes=[
                    ecs.Volume(name="efs_mount",
                               host=ecs.Host(source_path='/mnt/efs'))
                ],
            )

            self.jenkins_master_task.add_container(
                "JenkinsMasterContainer",
                image=ecs.ContainerImage.from_ecr_repository(
                    self.container_image.repository),
                cpu=int(config['DEFAULT']['ec2_cpu']),
                memory_limit_mib=int(
                    config['DEFAULT']['ec2_memory_limit_mib']),
                environment={
                    # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters
                    'JAVA_OPTS':
                    '-Djenkins.install.runSetupWizard=false',
                    'CASC_JENKINS_CONFIG':
                    '/config-as-code.yaml',
                    'network_stack':
                    self.vpc.stack_name,
                    'cluster_stack':
                    self.cluster.stack_name,
                    'worker_stack':
                    self.worker.stack_name,
                    'cluster_arn':
                    self.cluster.cluster.cluster_arn,
                    'aws_region':
                    config['DEFAULT']['region'],
                    'jenkins_url':
                    config['DEFAULT']['jenkins_url'],
                    'subnet_ids':
                    ",".join(
                        [x.subnet_id for x in self.vpc.vpc.private_subnets]),
                    'security_group_ids':
                    self.worker.worker_security_group.security_group_id,
                    'execution_role_arn':
                    self.worker.worker_execution_role.role_arn,
                    'task_role_arn':
                    self.worker.worker_task_role.role_arn,
                    'worker_log_group':
                    self.worker.worker_logs_group.log_group_name,
                    'worker_log_stream_prefix':
                    self.worker.worker_log_stream.log_stream_name
                },
                logging=ecs.LogDriver.aws_logs(
                    stream_prefix="JenkinsMaster",
                    log_retention=logs.RetentionDays.ONE_WEEK),
            )

            self.jenkins_master_task.default_container.add_mount_points(
                ecs.MountPoint(container_path='/var/jenkins_home',
                               source_volume="efs_mount",
                               read_only=False))

            self.jenkins_master_task.default_container.add_port_mappings(
                ecs.PortMapping(container_port=8080, host_port=8080))

            self.jenkins_master_service = ecs.Ec2Service(
                self,
                "EC2MasterService",
                task_definition=self.jenkins_master_task,
                cloud_map_options=ecs.CloudMapOptions(
                    name="master", dns_record_type=sd.DnsRecordType('A')),
                desired_count=1,
                min_healthy_percent=0,
                max_healthy_percent=100,
                enable_ecs_managed_tags=True,
                cluster=self.cluster.cluster,
            )

            self.target_group = self.listener.add_targets(
                "JenkinsMasterTarget",
                port=80,
                targets=[
                    self.jenkins_master_service.load_balancer_target(
                        container_name=self.jenkins_master_task.
                        default_container.container_name,
                        container_port=8080,
                    )
                ],
                deregistration_delay=core.Duration.seconds(10))

        # Opening port 5000 for master <--> worker communications
        self.jenkins_master_service.task_definition.default_container.add_port_mappings(
            ecs.PortMapping(container_port=50000, host_port=50000))

        # Enable connection between Master and Worker
        self.jenkins_master_service.connections.allow_from(
            other=self.worker.worker_security_group,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation='Master to Worker 50000',
                                from_port=50000,
                                to_port=50000))

        # Enable connection between Master and Worker on 8080
        self.jenkins_master_service.connections.allow_from(
            other=self.worker.worker_security_group,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation='Master to Worker 8080',
                                from_port=8080,
                                to_port=8080))

        # IAM Statements to allow jenkins ecs plugin to talk to ECS as well as the Jenkins cluster #
        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(
                actions=[
                    "ecs:RegisterTaskDefinition",
                    "ecs:DeregisterTaskDefinition", "ecs:ListClusters",
                    "ecs:DescribeContainerInstances",
                    "ecs:ListTaskDefinitions", "ecs:DescribeTaskDefinition",
                    "ecs:DescribeTasks"
                ],
                resources=["*"],
            ))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(actions=["ecs:ListContainerInstances"],
                                resources=[self.cluster.cluster.cluster_arn]))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(
                actions=["ecs:RunTask"],
                resources=[
                    "arn:aws:ecs:{0}:{1}:task-definition/fargate-workers*".
                    format(
                        self.region,
                        self.account,
                    )
                ]))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(actions=["ecs:StopTask"],
                                resources=[
                                    "arn:aws:ecs:{0}:{1}:task/*".format(
                                        self.region, self.account)
                                ],
                                conditions={
                                    "ForAnyValue:ArnEquals": {
                                        "ecs:cluster":
                                        self.cluster.cluster.cluster_arn
                                    }
                                }))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(actions=["iam:PassRole"],
                                resources=[
                                    self.worker.worker_task_role.role_arn,
                                    self.worker.worker_execution_role.role_arn
                                ]))
        # END OF JENKINS ECS PLUGIN IAM POLICIES #
        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(
                actions=["*"],
                resources=[self.worker.worker_logs_group.log_group_arn]))
Ejemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # ==============================
        # ======= CFN PARAMETERS =======
        # ==============================
        project_name_param = core.CfnParameter(scope=self, id='ProjectName', type='String')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = f'{project_name_param.value_as_string}-artifacts-{core.Aws.ACCOUNT_ID}'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        # ==================================================
        # ================= IAM ROLE =======================
        # ==================================================
        role = iam.Role(scope=self, id='TASKROLE', assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess'))
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonECS_FullAccess'))

        # ==================================================
        # ================== SECRET ========================
        # ==================================================
        db_password_secret = sm.Secret(
            scope=self,
            id='DBSECRET',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True)
        )
        

        # ==================================================
        # ==================== VPC =========================
        # ==================================================
        #public_subnet = ec2.SubnetConfiguration(name='Public', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=28)
        #dev-shared-public-subnet-az1
        #private_subnet = ec2.SubnetConfiguration(name='Private', subnet_type=ec2.SubnetType.PRIVATE, cidr_mask=28)
        #dev-shared-private-subnet-az1
        #isolated_subnet = ec2.SubnetConfiguration(name='DB', subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=28) 
        #dev-shared-private-subnet-az1

        #use existing (is needed later for fargete)
        """ vpc = ec2.Vpc(
            scope=self,
            id='VPC',
            cidr='10.0.0.0/24',
            max_azs=2,
            nat_gateway_provider=ec2.NatProvider.gateway(),
            nat_gateways=1,
            subnet_configuration=[public_subnet, private_subnet, isolated_subnet]
        ) """

        """ stack = MyStack(
            app, "MyStack", env=Environment(account="account_id", region="region")
        ) """

        vpc = ec2.Vpc.from_lookup(self, "VPC",
            vpc_id = "vpc-03076add1b1efca31" #is_default=True
        ) #TODO: fill in correct arguments
        #vpc_id = "vpc-03076add1b1efca31"

        #leave, should be fine, if not check (is nto NAT gateway)

        #original: vpc.add_gateway_endpoint('S3Endpoint', service=ec2.GatewayVpcEndpointAwsService.S3)
        # ==================================================
        # ================= S3 BUCKET ======================
        # ==================================================
        artifact_bucket = s3.Bucket(
            scope=self,
            id='ARTIFACTBUCKET',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY
        )
        # # ==================================================
        # # ================== DATABASE  =====================
        # # ==================================================
        # Creates a security group for AWS RDS
        sg_rds = ec2.SecurityGroup(scope=self, id='SGRDS', vpc=vpc, security_group_name='sg_rds')
        # Adds an ingress rule which allows resources in the VPC's CIDR to access the database.
        #original: sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('10.0.0.0/24'), connection=ec2.Port.tcp(port))
        sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('10.206.192.0/19'), connection=ec2.Port.tcp(port))
        #10.206.192.0/19

        database = rds.DatabaseInstance(
            scope=self,
            id='MYSQL',
            database_name=db_name,
            port=port,
            credentials=rds.Credentials.from_username(username=username, password=db_password_secret.secret_value),
            engine=rds.DatabaseInstanceEngine.mysql(version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            security_groups=[sg_rds],
            #vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.ISOLATED),
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), #TODO: check if you need to select private here and how
            # multi_az=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False
        )
        # ==================================================
        # =============== FARGATE SERVICE ==================
        # ==================================================
        
        cluster = ecs.Cluster(scope=self, id='CLUSTER', cluster_name=cluster_name, vpc=vpc)

        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id='MLflow',
            task_role=role,

        )

        container = task_definition.add_container(
            id='Container',
            image=ecs.ContainerImage.from_asset(
                directory='container',
                repository_name=container_repo_name
            ),
            environment={
                'BUCKET': f's3://{artifact_bucket.bucket_name}',
                'HOST': database.db_instance_endpoint_address,
                'PORT': str(port),
                'DATABASE': db_name,
                'USERNAME': username
            },
            secrets={
                'PASSWORD': ecs.Secret.from_secrets_manager(db_password_secret)
            }
        )
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id='MLFLOW',
            service_name=service_name,
            cluster=cluster,
            task_definition=task_definition
        )

        # Setup security group
        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(5000),
            description='Allow inbound from VPC for mlflow'
        )

        # Setup autoscaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id='AUTOSCALING',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60)
        )
        # ==================================================
        # =================== OUTPUTS ======================
        # ==================================================
        core.CfnOutput(scope=self, id='LoadBalancerDNS', value=fargate_service.load_balancer.load_balancer_dns_name)
Ejemplo n.º 17
0
    def createResources(self, ns):

        # Security Group Updates
        albsg = self.bentoALB.connections.security_groups[0]
        self.ecssg = self.bentoECS_ASG.connections.security_groups[0]

        botoec2 = boto3.client('ec2')
        group_name = 'bento-bastion-sg'
        response = botoec2.describe_security_groups(
            Filters=[dict(Name='group-name', Values=[group_name])])
        bastion_group_id = response['SecurityGroups'][0]['GroupId']
        self.bastionsg = ec2.SecurityGroup.from_security_group_id(
            self, 'bastion-security-group', security_group_id=bastion_group_id)

        self.ecssg.add_ingress_rule(
            albsg,
            ec2.Port.tcp(int(self.config[ns]['backend_container_port'])))
        self.ecssg.add_ingress_rule(
            albsg,
            ec2.Port.tcp(int(self.config[ns]['frontend_container_port'])))
        self.ecssg.add_ingress_rule(self.bastionsg, ec2.Port.tcp(22))

        # Backend Task Definition
        backendECSTask = ecs.Ec2TaskDefinition(
            self, "bento-ecs-backend", network_mode=ecs.NetworkMode.AWS_VPC)

        backendECSContainer = backendECSTask.add_container(
            'api',
            image=ecs.ContainerImage.from_registry(
                "cbiitssrepo/bento-backend:latest"),
            memory_reservation_mib=1024,
            cpu=512)

        backend_port_mapping = ecs.PortMapping(
            container_port=int(self.config[ns]['backend_container_port']),
            host_port=int(self.config[ns]['backend_container_port']),
            protocol=ecs.Protocol.TCP)

        backendECSContainer.add_port_mappings(backend_port_mapping)

        # Backend Service
        self.backendService = ecs.Ec2Service(
            self,
            "{}-backend".format(ns),
            service_name="{}-backend".format(ns),
            task_definition=backendECSTask,
            cluster=self.bentoECS)

        # Frontend Task Definition
        frontendECSTask = ecs.Ec2TaskDefinition(
            self, "bento-ecs-frontend", network_mode=ecs.NetworkMode.AWS_VPC)

        frontendECSContainer = frontendECSTask.add_container(
            'ui',
            image=ecs.ContainerImage.from_registry(
                "cbiitssrepo/bento-frontend:latest"),
            memory_reservation_mib=1024,
            cpu=512)

        frontend_port_mapping = ecs.PortMapping(
            container_port=int(self.config[ns]['frontend_container_port']),
            host_port=int(self.config[ns]['frontend_container_port']),
            protocol=ecs.Protocol.TCP)

        frontendECSContainer.add_port_mappings(frontend_port_mapping)

        # Frontend Service
        self.frontendService = ecs.Ec2Service(
            self,
            "{}-frontend".format(ns),
            service_name="{}-frontend".format(ns),
            task_definition=frontendECSTask,
            cluster=self.bentoECS)
Ejemplo n.º 18
0
    def create_fagate_NLB_autoscaling_custom(self, vpc, **kwargs):
        ####################
        # Unpack Value for name/ecr_repo
        app_name = kwargs['function'].replace("_", "-")
        task_name = "{}-task-definition".format(app_name)
        log_name = app_name
        image_name = "{}-image".format(app_name)
        container_name = "{}-container".format(app_name)
        service_name = "{}-service".format(app_name)

        app_ecr = kwargs['ecr']
        ecs_role = kwargs['ecs_role']

        ####################
        # Create Cluster
        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        ####################
        # Config IAM Role
        # add managed policy statement
        # ecs_base_role = iam.Role(
        #     self,
        #     "ecs_service_role",
        #     assumed_by=iam.ServicePrincipal("ecs.amazonaws.com")
        # )
        # ecs_role = ecs_base_role.from_role_arn(self, 'gw-ecr-role-test', role_arn='arn:aws:iam::002224604296:role/ecsTaskExecutionRole')

        ####################
        # Create Fargate Task Definition
        fargate_task = ecs.FargateTaskDefinition(self,
                                                 task_name,
                                                 execution_role=ecs_role,
                                                 task_role=ecs_role,
                                                 cpu=2048,
                                                 memory_limit_mib=8192)
        # 0. config log
        ecs_log = ecs.LogDrivers.aws_logs(stream_prefix=log_name)
        # 1. prepare ecr repository
        ecr_repo = ecr.Repository.from_repository_name(self,
                                                       id=image_name,
                                                       repository_name=app_ecr)
        farget_container = fargate_task.add_container(
            container_name,
            image=ecs.ContainerImage.from_ecr_repository(ecr_repo),
            logging=ecs_log,
            environment={
                'KG_PATH': "s3://autorec-1",
                "REDIS_URL": self.redis_host,
                "REDIS_PORT": self.redis_port
            })
        # 2. config port mapping
        port_mapping = ecs.PortMapping(container_port=9008,
                                       host_port=9008,
                                       protocol=ecs.Protocol.TCP)
        farget_container.add_port_mappings(port_mapping)

        ####################
        # Config NLB service
        # fargate_service = ecs.FargateService(self, 'graph-inference-service',
        #     cluster=cluster, task_definition=fargate_task, assign_public_ip=True
        # )
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            service_name,
            cluster=cluster,
            task_definition=fargate_task,
            assign_public_ip=True,
            desired_count=20,
            listener_port=9008)
        # 0. allow inbound in sg
        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(
                # peer = ec2.Peer.ipv4(vpc.vpc_cidr_block),
                peer=ec2.Peer.ipv4('0.0.0.0/0'),
                connection=ec2.Port.tcp(9008),
                description="Allow http inbound from VPC")

        # 1. setup autoscaling policy
        # autoscaling 自动scale
        #         scaling = fargate_service.service.auto_scale_task_count(
        #             max_capacity=50
        #         )
        #         scaling.scale_on_cpu_utilization(
        #             "CpuScaling",
        #             target_utilization_percent=50,
        #             scale_in_cooldown=core.Duration.seconds(60),
        #             scale_out_cooldown=core.Duration.seconds(60),
        #         )

        return fargate_service.load_balancer.load_balancer_dns_name
class MLflowStack(core.Stack):

    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        ##
        ##Parametros gerais utilizados para provisioamento de infra
        ##
        project_name_param = core.CfnParameter(scope=self, id='mlflowStack', type='String', default='mlflowStack')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = 'mlflowbucket-track-stack'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        #Associação das policys gerenciadas a role que sera atribuida a task ECS.        
        role = iam.Role(scope=self, id='TASKROLE', assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonECS_FullAccess'))

        #Secrets Manager responsavel pelo armazenamento do password do nosso RDS MySQL
        db_password_secret = sm.Secret(
            scope=self,
            id='dbsecret',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True)
        )

         #Criação do Bucket S3
        artifact_bucket = s3.Bucket(
            scope=self,
            id='mlflowstacktrack',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY
        )

      #Obtenção de VPC para atribuição ao RDS
      dev_vpc = ec2.Vpc.from_vpc_attributes(
            self, '<VPC_NAME>',
            vpc_id = "<VPC_ID>",
            availability_zones = core.Fn.get_azs(),
            private_subnet_ids = ["PRIVATE_SUBNET_ID_1","PRIVATE_SUBNET_ID_2","PRIVATE_SUBNET_ID_3"]
       )

        # Adicionamos aqui para efeito de testes 0.0.0.0/0
        sg_rds = ec2.SecurityGroup(scope=self, id='SGRDS', security_group_name='sg_rds', vpc=dev_vpc)
        
        sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(port))

        # Criação da instancia RDS
        database = rds.DatabaseInstance(
            scope=self,
            id='MYSQL',
            database_name=db_name,
            port=port,
            credentials=rds.Credentials.from_username(username=username, password=db_password_secret.secret_value),
            engine=rds.DatabaseInstanceEngine.mysql(version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),            
            security_groups=[sg_rds],
            vpc=dev_vpc,            
            # multi_az=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False
        )

        #Criação do Cluster ECS
        cluster = ecs.Cluster(scope=self, id='CLUSTER', cluster_name=cluster_name)
        #Task Definition para Fargate
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id='MLflow',
            task_role=role,

        )
        #Criando nosso container com base no Dockerfile do MLflow
        container = task_definition.add_container(
            id='Container',
            image=ecs.ContainerImage.from_asset(
                directory='../MLflow/container',
                repository_name=container_repo_name
            ),
             #Atribuição Variaves ambiente
            environment={
                'BUCKET': f's3://{artifact_bucket.bucket_name}',
                'HOST': database.db_instance_endpoint_address,
                'PORT': str(port),
                'DATABASE': db_name,
                'USERNAME': username
            },
            #Secrets contendo o password do RDS MySQL
            secrets={
                'PASSWORD': ecs.Secret.from_secrets_manager(db_password_secret)
            }
        )
        #Port Mapping para exposição do Container MLflow
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id='MLFLOW',
            service_name=service_name,
            cluster=cluster,
            task_definition=task_definition
        )

        #Security group para ingress
        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4('0.0.0.0/0'),
            connection=ec2.Port.tcp(5000),
            description='Allow inbound for mlflow'
        )

        #Auto Scaling Policy para nosso balanceador
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id='AUTOSCALING',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60)
        )
     
        core.CfnOutput(scope=self, id='LoadBalancerDNS', value=fargate_service.load_balancer.load_balancer_dns_name)
Ejemplo n.º 20
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: aws_ec2.Vpc,
                 ecs_cluster=aws_ecs.Cluster,
                 alb=elbv2.ApplicationLoadBalancer,
                 albTestListener=elbv2.ApplicationListener,
                 albProdListener=elbv2.ApplicationListener,
                 blueGroup=elbv2.ApplicationTargetGroup,
                 greenGroup=elbv2.ApplicationTargetGroup,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ECS_APP_NAME = "Nginx-app",
        ECS_DEPLOYMENT_GROUP_NAME = "NginxAppECSBlueGreen"
        ECS_DEPLOYMENT_CONFIG_NAME = "CodeDeployDefault.ECSLinear10PercentEvery1Minutes"
        ECS_TASKSET_TERMINATION_WAIT_TIME = 10
        ECS_TASK_FAMILY_NAME = "Nginx-microservice"
        ECS_APP_NAME = "Nginx-microservice"
        ECS_APP_LOG_GROUP_NAME = "/ecs/Nginx-microservice"
        DUMMY_TASK_FAMILY_NAME = "sample-Nginx-microservice"
        DUMMY_APP_NAME = "sample-Nginx-microservice"
        DUMMY_APP_LOG_GROUP_NAME = "/ecs/sample-Nginx-microservice"
        DUMMY_CONTAINER_IMAGE = "smuralee/nginx"

        # =============================================================================
        # ECR and CodeCommit repositories for the Blue/ Green deployment
        # =============================================================================

        # ECR repository for the docker images
        NginxecrRepo = aws_ecr.Repository(self,
                                          "NginxRepo",
                                          image_scan_on_push=True)

        NginxCodeCommitrepo = aws_codecommit.Repository(
            self,
            "NginxRepository",
            repository_name=ECS_APP_NAME,
            description="Oussama application hosted on NGINX")

        # =============================================================================
        #   CODE BUILD and ECS TASK ROLES for the Blue/ Green deployment
        # =============================================================================

        # IAM role for the Code Build project
        codeBuildServiceRole = aws_iam.Role(
            self,
            "codeBuildServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codebuild.amazonaws.com'))

        inlinePolicyForCodeBuild = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:InitiateLayerUpload", "ecr:UploadLayerPart",
                "ecr:CompleteLayerUpload", "ecr:PutImage"
            ],
            resources=["*"])

        codeBuildServiceRole.add_to_policy(inlinePolicyForCodeBuild)

        # ECS task role
        ecsTaskRole = aws_iam.Role(
            self,
            "ecsTaskRoleForWorkshop",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        ecsTaskRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonECSTaskExecutionRolePolicy"))

        # =============================================================================
        # CODE DEPLOY APPLICATION for the Blue/ Green deployment
        # =============================================================================

        # Creating the code deploy application
        codeDeployApplication = codedeploy.EcsApplication(
            self, "NginxAppCodeDeploy")

        # Creating the code deploy service role
        codeDeployServiceRole = aws_iam.Role(
            self,
            "codeDeployServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codedeploy.amazonaws.com'))
        codeDeployServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSCodeDeployRoleForECS"))

        # IAM role for custom lambda function
        customLambdaServiceRole = aws_iam.Role(
            self,
            "codeDeployCustomLambda",
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'))

        inlinePolicyForLambda = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codedeploy:List*",
                "codedeploy:Get*", "codedeploy:UpdateDeploymentGroup",
                "codedeploy:CreateDeploymentGroup",
                "codedeploy:DeleteDeploymentGroup"
            ],
            resources=["*"])

        customLambdaServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        customLambdaServiceRole.add_to_policy(inlinePolicyForLambda)

        # Custom resource to create the deployment group
        createDeploymentGroupLambda = aws_lambda.Function(
            self,
            'createDeploymentGroupLambda',
            code=aws_lambda.Code.from_asset("custom_resources"),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='create_deployment_group.handler',
            role=customLambdaServiceRole,
            description="Custom resource to create deployment group",
            memory_size=128,
            timeout=core.Duration.seconds(60))

        # ================================================================================================
        # CloudWatch Alarms for 4XX errors
        blue4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": blueGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))

        blueGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "blue4xxErrors",
            alarm_name="Blue_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Blue target group",
            metric=blue4xxMetric,
            threshold=1,
            evaluation_periods=1)

        green4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": greenGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))
        greenGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "green4xxErrors",
            alarm_name="Green_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Green target group",
            metric=green4xxMetric,
            threshold=1,
            evaluation_periods=1)

        # ================================================================================================
        # DUMMY TASK DEFINITION for the initial service creation
        # This is required for the service being made available to create the CodeDeploy Deployment Group
        # ================================================================================================
        sampleTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "sampleTaskDefn",
            family=DUMMY_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        sampleContainerDefn = sampleTaskDefinition.add_container(
            "sampleAppContainer",
            image=aws_ecs.ContainerImage.from_registry(DUMMY_CONTAINER_IMAGE),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "sampleAppLogGroup",
                log_group_name=DUMMY_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=DUMMY_APP_NAME),
            docker_labels={"name": DUMMY_APP_NAME})

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        sampleContainerDefn.add_port_mappings(port_mapping)

        # ================================================================================================
        # ECS task definition using ECR image
        # Will be used by the CODE DEPLOY for Blue/Green deployment
        # ================================================================================================
        NginxTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "appTaskDefn",
            family=ECS_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        NginxcontainerDefinition = NginxTaskDefinition.add_container(
            "NginxAppContainer",
            image=aws_ecs.ContainerImage.from_ecr_repository(
                NginxecrRepo, "latest"),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "NginxAppLogGroup",
                log_group_name=ECS_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=ECS_APP_NAME),
            docker_labels={"name": ECS_APP_NAME})
        NginxcontainerDefinition.add_port_mappings(port_mapping)

        # =============================================================================
        # ECS SERVICE for the Blue/ Green deployment
        # =============================================================================
        NginxAppService = aws_ecs.FargateService(
            self,
            "NginxAppService",
            cluster=ecs_cluster,
            task_definition=NginxTaskDefinition,
            health_check_grace_period=core.Duration.seconds(10),
            desired_count=3,
            deployment_controller={
                "type": aws_ecs.DeploymentControllerType.CODE_DEPLOY
            },
            service_name=ECS_APP_NAME)

        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(80))
        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(8080))
        NginxAppService.attach_to_application_target_group(blueGroup)

        # =============================================================================
        # CODE DEPLOY - Deployment Group CUSTOM RESOURCE for the Blue/ Green deployment
        # =============================================================================

        core.CustomResource(
            self,
            'customEcsDeploymentGroup',
            service_token=createDeploymentGroupLambda.function_arn,
            properties={
                "ApplicationName": codeDeployApplication.application_name,
                "DeploymentGroupName": ECS_DEPLOYMENT_GROUP_NAME,
                "DeploymentConfigName": ECS_DEPLOYMENT_CONFIG_NAME,
                "ServiceRoleArn": codeDeployServiceRole.role_arn,
                "BlueTargetGroup": blueGroup.target_group_name,
                "GreenTargetGroup": greenGroup.target_group_name,
                "ProdListenerArn": albProdListener.listener_arn,
                "TestListenerArn": albTestListener.listener_arn,
                "EcsClusterName": ecs_cluster.cluster_name,
                "EcsServiceName": NginxAppService.service_name,
                "TerminationWaitTime": ECS_TASKSET_TERMINATION_WAIT_TIME,
                "BlueGroupAlarm": blueGroupAlarm.alarm_name,
                "GreenGroupAlarm": greenGroupAlarm.alarm_name,
            })

        ecsDeploymentGroup = codedeploy.EcsDeploymentGroup.from_ecs_deployment_group_attributes(
            self,
            "ecsDeploymentGroup",
            application=codeDeployApplication,
            deployment_group_name=ECS_DEPLOYMENT_GROUP_NAME,
            deployment_config=codedeploy.EcsDeploymentConfig.
            from_ecs_deployment_config_name(self, "ecsDeploymentConfig",
                                            ECS_DEPLOYMENT_CONFIG_NAME))

        # =============================================================================
        # CODE BUILD PROJECT for the Blue/ Green deployment
        # =============================================================================

        # Creating the code build project
        NginxAppcodebuild = aws_codebuild.Project(
            self,
            "NginxAppCodeBuild",
            role=codeBuildServiceRole,
            environment=aws_codebuild.BuildEnvironment(
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_4_0,
                compute_type=aws_codebuild.ComputeType.SMALL,
                privileged=True,
                environment_variables={
                    'REPOSITORY_URI': {
                        'value':
                        NginxecrRepo.repository_uri,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_EXECUTION_ARN': {
                        'value':
                        ecsTaskRole.role_arn,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_FAMILY': {
                        'value':
                        ECS_TASK_FAMILY_NAME,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    }
                }),
            source=aws_codebuild.Source.code_commit(
                repository=NginxCodeCommitrepo))

        # =============================================================================
        # CODE PIPELINE for Blue/Green ECS deployment
        # =============================================================================

        codePipelineServiceRole = aws_iam.Role(
            self,
            "codePipelineServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codepipeline.amazonaws.com'))

        inlinePolicyForCodePipeline = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codecommit:Get*",
                "codecommit:List*", "codecommit:GitPull",
                "codecommit:UploadArchive", "codecommit:CancelUploadArchive",
                "codebuild:BatchGetBuilds", "codebuild:StartBuild",
                "codedeploy:CreateDeployment", "codedeploy:Get*",
                "codedeploy:RegisterApplicationRevision", "s3:Get*",
                "s3:List*", "s3:PutObject"
            ],
            resources=["*"])

        codePipelineServiceRole.add_to_policy(inlinePolicyForCodePipeline)

        sourceArtifact = codepipeline.Artifact('sourceArtifact')
        buildArtifact = codepipeline.Artifact('buildArtifact')

        # S3 bucket for storing the code pipeline artifacts
        NginxAppArtifactsBucket = s3.Bucket(
            self,
            "NginxAppArtifactsBucket",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # S3 bucket policy for the code pipeline artifacts
        denyUnEncryptedObjectUploads = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:PutObject"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={
                "StringNotEquals": {
                    "s3:x-amz-server-side-encryption": "aws:kms"
                }
            })

        denyInsecureConnections = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:*"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={"Bool": {
                "aws:SecureTransport": "false"
            }})

        NginxAppArtifactsBucket.add_to_resource_policy(
            denyUnEncryptedObjectUploads)
        NginxAppArtifactsBucket.add_to_resource_policy(denyInsecureConnections)

        # Code Pipeline - CloudWatch trigger event is created by CDK
        codepipeline.Pipeline(
            self,
            "ecsBlueGreen",
            role=codePipelineServiceRole,
            artifact_bucket=NginxAppArtifactsBucket,
            stages=[
                codepipeline.StageProps(
                    stage_name='Source',
                    actions=[
                        aws_codepipeline_actions.CodeCommitSourceAction(
                            action_name='Source',
                            repository=NginxCodeCommitrepo,
                            output=sourceArtifact,
                        )
                    ]),
                codepipeline.StageProps(
                    stage_name='Build',
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            action_name='Build',
                            project=NginxAppcodebuild,
                            input=sourceArtifact,
                            outputs=[buildArtifact])
                    ]),
                codepipeline.StageProps(
                    stage_name='Deploy',
                    actions=[
                        aws_codepipeline_actions.CodeDeployEcsDeployAction(
                            action_name='Deploy',
                            deployment_group=ecsDeploymentGroup,
                            app_spec_template_input=buildArtifact,
                            task_definition_template_input=buildArtifact,
                        )
                    ])
            ])

        # =============================================================================
        # Export the outputs
        # =============================================================================
        core.CfnOutput(self,
                       "ecsBlueGreenCodeRepo",
                       description="Demo app code commit repository",
                       export_name="ecsBlueGreenDemoAppRepo",
                       value=NginxCodeCommitrepo.repository_clone_url_http)

        core.CfnOutput(self,
                       "ecsBlueGreenLBDns",
                       description="Load balancer DNS",
                       export_name="ecsBlueGreenLBDns",
                       value=alb.load_balancer_dns_name)
Ejemplo n.º 21
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc = ec2.Vpc(self, "PirateVpc", max_azs=1)

        cluster = ecs.Cluster(self,
                              "PirateCluster",
                              container_insights=True,
                              vpc=vpc)

        cluster.add_capacity(
            'Shipyard',
            block_devices=[
                autoscaling.BlockDevice(
                    device_name='/dev/xvda',
                    volume=autoscaling.BlockDeviceVolume.ebs(
                        volume_size=1000))  # 1 TB
            ],
            instance_type=ec2.InstanceType('m4.4xlarge'))

        task_definition = ecs.Ec2TaskDefinition(
            self,
            'PirateTask',
            family='eth2',
            volumes=[
                ecs.Volume(
                    name='v',
                    docker_volume_configuration=ecs.DockerVolumeConfiguration(
                        driver='local',
                        scope=ecs.Scope.
                        SHARED,  # So it persists between beyond the lifetime of the task
                        autoprovision=True))
            ])

        container = task_definition.add_container(
            'barbosa',
            image=ecs.ContainerImage.from_registry(
                'sigp/lighthouse'),  # TODO: configurable
            command=[
                '--network pyrmont beacon', '--http', '--http-address 0.0.0.0'
            ],
            cpu=4 * 1024,  # 4vCPU -> 8-30GB memory
            container_name='Pirate',
            logging=ecs.LogDrivers.aws_logs(stream_prefix='pirate'),
            memory_reservation_mib=16 * 1024,  # 16GB
            port_mappings=[
                ecs.PortMapping(container_port=9000,
                                host_port=9000),  # protocol=TCP
                ecs.PortMapping(container_port=5052,
                                host_port=5052),  # protocol=TCP
            ],
            secrets={
                # TODO: populate these with our keys
            },
            user='******')

        service = ecs_patterns.ApplicationLoadBalancedEc2Service(
            self,
            "Pirateship",
            #                certificate=???,  # TODO: set up the public domain
            cluster=cluster,
            desired_count=1,
            #                domain_name='ethpirates.com',
            #                domain_zone=???,  # TODO: set up the public domain
            public_load_balancer=True,
            task_definition=task_definition)
    def __init__(self, scope: core.Construct, construct_id: str,
                 identifier: str, user_arn: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        bucket = aws_s3.Bucket(
            self,
            id=f"flow-storage-bucket-{identifier}",
            auto_delete_objects=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        cache_bucket = aws_s3.Bucket(
            self,
            id=f"flow-cache-bucket-{identifier}",
            auto_delete_objects=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        vpc = aws_ec2.Vpc(
            self,
            id=f"bakery-vpc-{identifier}",
            cidr="10.0.0.0/16",
            enable_dns_hostnames=True,
            enable_dns_support=True,
            nat_gateways=0,
            subnet_configuration=[
                aws_ec2.SubnetConfiguration(
                    name="PublicSubnet1",
                    subnet_type=aws_ec2.SubnetType.PUBLIC)
            ],
            max_azs=3,
        )
        security_group = aws_ec2.SecurityGroup(
            self,
            id=f"security-group-{identifier}",
            vpc=vpc,
        )
        security_group.add_ingress_rule(aws_ec2.Peer.any_ipv4(),
                                        aws_ec2.Port.tcp_range(8786, 8787))
        security_group.add_ingress_rule(aws_ec2.Peer.any_ipv6(),
                                        aws_ec2.Port.tcp_range(8786, 8787))
        security_group.add_ingress_rule(security_group, aws_ec2.Port.all_tcp())
        cluster = aws_ecs.Cluster(
            self,
            id=f"bakery-cluster-{identifier}",
            vpc=vpc,
        )

        ecs_task_role = aws_iam.Role(
            self,
            id=f"prefect-ecs-task-role-{identifier}",
            assumed_by=aws_iam.ServicePrincipal(
                service="ecs-tasks.amazonaws.com"),
        )
        ecs_task_role.add_to_policy(
            aws_iam.PolicyStatement(
                resources=["*"],
                actions=[
                    "iam:ListRoleTags",
                ],
            ))
        ecs_task_role.add_to_policy(
            aws_iam.PolicyStatement(
                resources=[
                    f"arn:aws:logs:{self.region}:{self.account}:log-group:dask-ecs*"
                ],
                actions=[
                    "logs:GetLogEvents",
                ],
            ))
        bucket.grant_read_write(ecs_task_role)
        cache_bucket.grant_read_write(ecs_task_role)

        bucket_user = aws_iam.User.from_user_arn(
            self,
            id=f"prefect-bucket-user-{identifier}",
            user_arn=user_arn,
        )

        cache_bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:*Object"],
                resources=[f"{cache_bucket.bucket_arn}/*"],
                principals=[bucket_user],
            ))

        cache_bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:ListBucket"],
                resources=[cache_bucket.bucket_arn],
                principals=[bucket_user],
            ))

        bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:*Object"],
                resources=[f"{bucket.bucket_arn}/*"],
                principals=[bucket_user],
            ))

        bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:ListBucket"],
                resources=[bucket.bucket_arn],
                principals=[bucket_user],
            ))

        ecs_task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonECS_FullAccess"))

        prefect_ecs_agent_task_definition = aws_ecs.FargateTaskDefinition(
            self,
            id=f"prefect-ecs-agent-task-definition-{identifier}",
            cpu=512,
            memory_limit_mib=2048,
            task_role=ecs_task_role,
        )

        runner_token_secret = aws_ecs.Secret.from_secrets_manager(
            secret=aws_secretsmanager.Secret.from_secret_arn(
                self,
                id=f"prefect-cloud-runner-token-{identifier}",
                secret_arn=os.environ["RUNNER_TOKEN_SECRET_ARN"],
            ),
            field="RUNNER_TOKEN",
        )

        prefect_ecs_agent_task_definition.add_container(
            id=f"prefect-ecs-agent-task-container-{identifier}",
            image=aws_ecs.ContainerImage.from_registry(
                os.environ["BAKERY_IMAGE"]),
            port_mappings=[
                aws_ecs.PortMapping(container_port=8080, host_port=8080)
            ],
            logging=aws_ecs.LogDriver.aws_logs(stream_prefix="ecs-agent"),
            environment={
                "PREFECT__CLOUD__AGENT__LABELS":
                os.environ["PREFECT__CLOUD__AGENT__LABELS"]
            },
            secrets={"PREFECT__CLOUD__AGENT__AUTH_TOKEN": runner_token_secret},
            command=[
                "prefect",
                "agent",
                "ecs",
                "start",
                "--agent-address",
                "http://:8080",
                "--cluster",
                cluster.cluster_arn,
                "--task-role-arn",
                ecs_task_role.role_arn,
            ],
        )

        prefect_ecs_agent_service = aws_ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            id=f"prefect-ecs-agent-service-{identifier}",
            assign_public_ip=True,
            platform_version=aws_ecs.FargatePlatformVersion.LATEST,
            desired_count=1,
            task_definition=prefect_ecs_agent_task_definition,
            cluster=cluster,
            propagate_tags=aws_ecs.PropagatedTagSource.SERVICE,
        )

        prefect_ecs_agent_service.target_group.configure_health_check(
            path="/api/health", port="8080")

        ecs_task_execution_role = aws_iam.Role(
            self,
            id=f"prefect-ecs-task-execution-role-{identifier}",
            assumed_by=aws_iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonECSTaskExecutionRolePolicy"),
            ],
        )

        core.CfnOutput(
            self,
            id=f"prefect-task-role-arn-output-{identifier}",
            export_name=f"prefect-task-role-arn-output-{identifier}",
            value=ecs_task_role.role_arn,
        )

        core.CfnOutput(
            self,
            id=f"prefect-cluster-arn-output-{identifier}",
            export_name=f"prefect-cluster-arn-output-{identifier}",
            value=cluster.cluster_arn,
        )

        core.CfnOutput(
            self,
            id=f"prefect-storage-bucket-name-output-{identifier}",
            export_name=f"prefect-storage-bucket-name-output-{identifier}",
            value=bucket.bucket_name,
        )

        core.CfnOutput(
            self,
            id=f"prefect-cache-bucket-name-output-{identifier}",
            export_name=f"prefect-cache-bucket-name-output-{identifier}",
            value=cache_bucket.bucket_name,
        )

        core.CfnOutput(
            self,
            id=f"prefect-task-execution-role-arn-output-{identifier}",
            export_name=f"prefect-task-execution-role-arn-output-{identifier}",
            value=ecs_task_execution_role.role_arn,
        )

        core.CfnOutput(
            self,
            id=f"prefect-security-group-output-{identifier}",
            export_name=f"prefect-security-group-output-{identifier}",
            value=security_group.security_group_id,
        )

        core.CfnOutput(
            self,
            id=f"prefect-vpc-output-{identifier}",
            export_name=f"prefect-vpc-output-{identifier}",
            value=vpc.vpc_id,
        )
Ejemplo n.º 23
0
    def __init__(self, scope: core.Construct, id: str, region, domain,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC , we need one for ECS cluster ( sadly )
        vpc = ec2.Vpc.from_lookup(self, 'vpc', is_default=True)

        cluster = ecs.Cluster(self, 'Cluster', vpc=vpc)

        # Route53 & SSL Certificate
        zone = dns.HostedZone(self, "dns", zone_name=domain)

        dns.ARecord(self,
                    'MinecraftRecord',
                    zone=zone,
                    record_name='minecraft',
                    target=dns.RecordTarget(values=['1.2.3.4']))

        cert = acm.Certificate(
            self,
            'cert',
            domain_name=f'*.{domain}',
            validation=acm.CertificateValidation.from_dns(zone))

        # ECS ( Cluster, EFS, Task Def)
        fs = efs.FileSystem(self,
                            'EFS',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        task_definition = ecs.FargateTaskDefinition(self,
                                                    'TaskDef',
                                                    memory_limit_mib=4096,
                                                    cpu=1024)

        container = task_definition.add_container(
            'MinecraftDocker',
            image=ecs.ContainerImage.from_registry('darevee/minecraft-aws'),
            logging=ecs.AwsLogDriver(stream_prefix='Minecraf'),
            cpu=1024,
            memory_limit_mib=4096)
        container.add_mount_points(
            ecs.MountPoint(container_path='/minecraft',
                           source_volume='efs',
                           read_only=False))
        cfn_task = container.task_definition.node.default_child
        cfn_task.add_property_override("Volumes", [{
            "EFSVolumeConfiguration": {
                "FilesystemId": fs.file_system_id
            },
            "Name": "efs"
        }])

        container.add_port_mappings(ecs.PortMapping(container_port=25565))

        sg = ec2.SecurityGroup(self, 'sg', vpc=vpc)
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25565),
                            description='Minecraft Access')
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25575),
                            description='RCONN Access')

        fs.connections.allow_default_port_from(sg)

        subnets = ",".join(vpc.select_subnets().subnet_ids)

        # Cognito ( For ApiGW Authentication)
        userpool = cognito.UserPool(
            self,
            'UserPool',
            user_invitation=cognito.UserInvitationConfig(
                email_body=
                """No cześć {username}, zostałeś zaproszony do naszego Minecraft!
                Twoje tymczasowe hasło to {####}
                """,
                email_subject="Zaproszenie do minecrafta"))

        # APIGW (Gateway, Lambdas, S3 Static content)

        # Lambda Starter
        starter = _lambda.Function(self,
                                   'Starter',
                                   runtime=_lambda.Runtime.PYTHON_3_8,
                                   handler='index.lambda_handler',
                                   code=_lambda.Code.asset('lambda/starter'),
                                   timeout=core.Duration.seconds(300),
                                   environment={
                                       'cluster': cluster.cluster_name,
                                       'subnets': subnets,
                                       'security_groups': sg.security_group_id,
                                       'task_definition':
                                       task_definition.task_definition_arn,
                                       'region': region,
                                       'zone_id': zone.hosted_zone_id,
                                       'domain': domain
                                   })

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=[
                                    "ecs:ListTasks", "ecs:DescribeTasks",
                                    "ec2:DescribeNetworkInterfaces"
                                ]))
        starter.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[task_definition.task_definition_arn],
                actions=["ecs:RunTask", "ecs:DescribeTasks"]))
        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    task_definition.task_role.role_arn,
                                    task_definition.execution_role.role_arn
                                ],
                                actions=["iam:PassRole"]))

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[zone.hosted_zone_arn],
                                actions=["route53:ChangeResourceRecordSets"]))

        # S3 static webpage
        bucket = s3.Bucket(self,
                           "S3WWW",
                           public_read_access=True,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           website_index_document="index.html")
        s3d.BucketDeployment(self,
                             "S3Deploy",
                             destination_bucket=bucket,
                             sources=[s3d.Source.asset("static_page")])

        status = _lambda.Function(self,
                                  'Status',
                                  runtime=_lambda.Runtime.PYTHON_3_8,
                                  handler='index.lambda_handler',
                                  code=_lambda.Code.asset('lambda/status'),
                                  environment={
                                      'url': f"https://minecrafter.{domain}",
                                      'domain': domain
                                  })

        # ApiGW
        apigw = api.LambdaRestApi(self,
                                  'ApiGW',
                                  handler=status,
                                  proxy=False,
                                  domain_name={
                                      "domain_name": f'minecrafter.{domain}',
                                      "certificate": cert
                                  },
                                  default_cors_preflight_options={
                                      "allow_origins": api.Cors.ALL_ORIGINS,
                                      "allow_methods": api.Cors.ALL_METHODS
                                  })

        start = apigw.root.add_resource('start')
        start.add_method('ANY', integration=api.LambdaIntegration(starter))

        apigw.root.add_method('ANY')

        dns.ARecord(self,
                    'PointDNSToApiGW',
                    zone=zone,
                    target=dns.RecordTarget.from_alias(
                        targets.ApiGateway(apigw)),
                    record_name=f"minecrafter.{domain}")
Ejemplo n.º 24
0
    def __init__(self, scope, id, vpc, **kwarg) -> None:
        super().__init__(scope, id, **kwarg)

        # cluster creation
        cluster = aws_ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        # service discovery creation
        sd_namespace = cluster.add_default_cloud_map_namespace(
            name="svc.test.local", vpc=vpc)
        aws_servicediscovery.Service(self,
                                     "svc.test.local",
                                     namespace=sd_namespace,
                                     load_balancer=True)

        # ECS role creation
        ecs_principle = aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        execution_role = aws_iam.Role(self,
                                      'execution-role',
                                      assumed_by=ecs_principle)
        execution_role.add_managed_policy(
            policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AWSCodeDeployRoleForECS"))
        execution_role.add_managed_policy(
            policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ContainerRegistryReadOnly"))
        task_role = aws_iam.Role(self, 'task-role', assumed_by=ecs_principle)
        task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AWSAppMeshEnvoyAccess"))
        task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="CloudWatchFullAccess"))
        task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AWSXRayDaemonWriteAccess"))

        # envoy ecr object
        envoy_ecr = aws_ecr.Repository.from_repository_attributes(
            self,
            'aws-envoy',
            repository_arn=core.Stack.of(self).format_arn(
                service="ecr",
                resource="aws-appmesh-envoy",
                account="840364872350"),
            repository_name="aws-appmesh-envoy")

        # colorteller image builds
        gateway_image = aws_ecs.ContainerImage.from_asset("./src/gateway")
        colorteller_image = aws_ecs.ContainerImage.from_asset(
            "./src/colorteller")

        # logging setup
        log_group = aws_logs.LogGroup(self,
                                      "/ecs/colorteller",
                                      retention=aws_logs.RetentionDays.ONE_DAY)
        gateway_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                      stream_prefix="gateway")
        black_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                    stream_prefix="black")
        blue_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                   stream_prefix="blue")
        red_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                  stream_prefix="red")
        white_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                    stream_prefix="white")
        tcpecho_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                      stream_prefix="tcpecho")

        # Mesh properties setup
        mesh_properties = aws_ecs.AppMeshProxyConfigurationProps(
            app_ports=[9080],
            proxy_egress_port=15001,
            proxy_ingress_port=15000,
            egress_ignored_i_ps=["169.254.170.2", "169.254.169.254"],
            ignored_uid=1337)

        # envoy ulimit defaults
        envoy_ulimit = aws_ecs.Ulimit(hard_limit=15000,
                                      name=aws_ecs.UlimitName.NOFILE,
                                      soft_limit=15000)

        # fargate task def - requires envoy proxy container, gateway app and x-ray
        gateway_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "gateway_task",
            cpu=256,
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        gateway_task_def.add_container("gateway",
                                       logging=gateway_ecs_logs,
                                       environment={
                                           "SERVER_PORT":
                                           "9080",
                                           "STAGE":
                                           "v1.1",
                                           "COLOR_TELLER_ENDPOINT":
                                           "colorteller.svc.test.local:9080",
                                           "TCP_ECHO_ENDPOINT":
                                           "tcpecho.svc.test.local:2701"
                                       },
                                       image=gateway_image).add_port_mappings(
                                           aws_ecs.PortMapping(
                                               container_port=9080,
                                               protocol=aws_ecs.Protocol.TCP))
        gateway_task_def.add_container(
            "xray",
            logging=gateway_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))
        gateway_envoy_container = gateway_task_def.add_container(
            "envoy",
            logging=gateway_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "debug",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/gateway",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        gateway_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        gateway_envoy_container.add_ulimits(envoy_ulimit)

        # black task def - requires color app, envoy and x-ray containers
        black_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "black-task",
            cpu=256,
            family="black",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        black_envoy_container = black_task_def.add_container(
            "envoy",
            logging=black_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/black",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        black_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        black_envoy_container.add_ulimits(envoy_ulimit)
        black_app_container = black_task_def.add_container(
            "black",
            logging=black_ecs_logs,
            environment={
                "COLOR": "black",
                "SERVER_PORT": "9080",
                "STAGE": "v1.1"
            },
            image=colorteller_image)
        black_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        black_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=black_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        black_task_def.add_container(
            "xray",
            logging=black_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # blue task def (same as black)
        blue_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "blue-task",
            cpu=256,
            family="blue",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        blue_envoy_container = blue_task_def.add_container(
            "envoy",
            logging=blue_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/blue",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        blue_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        blue_envoy_container.add_ulimits(envoy_ulimit)
        blue_app_container = blue_task_def.add_container(
            "blue",
            logging=blue_ecs_logs,
            environment={
                "COLOR": "black",
                "SERVER_PORT": "9080",
                "STAGE": "v1.1"
            },
            image=colorteller_image)
        blue_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        blue_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=blue_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        blue_task_def.add_container(
            "xray",
            logging=blue_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # red task def (same as black)
        red_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "red-task",
            cpu=256,
            family="red-task",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        red_envoy_container = red_task_def.add_container(
            "envoy",
            logging=red_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/red",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        red_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        red_envoy_container.add_ulimits(envoy_ulimit)
        red_app_container = red_task_def.add_container("red",
                                                       logging=red_ecs_logs,
                                                       environment={
                                                           "COLOR": "red",
                                                           "SERVER_PORT":
                                                           "9080",
                                                           "STAGE": "v1.2"
                                                       },
                                                       image=colorteller_image)
        red_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        red_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=red_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        red_task_def.add_container(
            "xray",
            logging=red_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # white task def (same as black) - colorteller.svc.test.local points to this service (because containers need something to resolve to or they fail)
        white_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "white-task",
            cpu=256,
            family="white",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        white_envoy_container = white_task_def.add_container(
            "envoy",
            logging=white_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/white",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        white_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        white_envoy_container.add_ulimits(envoy_ulimit)
        white_app_container = white_task_def.add_container(
            "white",
            logging=white_ecs_logs,
            environment={
                "COLOR": "white",
                "SERVER_PORT": "9080",
                "STAGE": "v1.1"
            },
            image=colorteller_image)
        white_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        white_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=white_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        white_task_def.add_container(
            "xray",
            logging=white_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # tcpecho service (external docker image)
        tcpecho_task_def = aws_ecs.FargateTaskDefinition(
            self,
            'tcpecho-tasks',
            cpu=256,
            family="tcpecho",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role)
        tcpecho_task_def.add_container(
            "tcpecho",
            logging=tcpecho_ecs_logs,
            environment={
                "TCP_PORT": "2701",
                "NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/echo"
            },
            image=aws_ecs.ContainerImage.from_registry("cjimti/go-echo"),
            essential=True,
        ).add_port_mappings(
            aws_ecs.PortMapping(container_port=2701,
                                protocol=aws_ecs.Protocol.TCP))

        # adds task defs to fargate services - adds security group access to local vpc cidr block
        # all the services are treated the same way
        gateway_fargate_service = aws_ecs.FargateService(
            self,
            "gateway",
            cluster=cluster,
            task_definition=gateway_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="gateway"))
        gateway_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        black_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "black",
            cluster=cluster,
            task_definition=black_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="black"))
        black_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        blue_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "blue",
            cluster=cluster,
            task_definition=blue_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="blue"))
        blue_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        red_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "red",
            cluster=cluster,
            task_definition=red_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="red"))
        red_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        white_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "white",
            cluster=cluster,
            task_definition=white_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="colorteller"))
        white_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        echo_fargate_service = aws_ecs.FargateService(
            self,
            "tcpecho",
            cluster=cluster,
            task_definition=tcpecho_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="tcpecho"))
        echo_fargate_service.connections.security_groups[0].add_ingress_rule(
            peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=aws_ec2.Port.tcp(2701),
            description="Allow http inbound from VPC")

        # adds autoscaling policies to all services
        for service in [
                black_colorteller_fargate_service,
                blue_colorteller_fargate_service,
                red_colorteller_fargate_service,
                white_colorteller_fargate_service, gateway_fargate_service,
                echo_fargate_service
        ]:
            try:
                scaling = service.service.auto_scale_task_count(max_capacity=2)
            except AttributeError:
                scaling = service.auto_scale_task_count(max_capacity=2)
            scaling.scale_on_cpu_utilization(
                "CpuScaling",
                target_utilization_percent=50,
                scale_in_cooldown=core.Duration.seconds(60),
                scale_out_cooldown=core.Duration.seconds(60),
            )

        # configure loadbalancer to listen on port 80 and add targets to gateway and echo apps
        load_balancer = aws_elasticloadbalancingv2.ApplicationLoadBalancer(
            self, "lb", vpc=vpc, internet_facing=True)
        listener = load_balancer.add_listener("PublicListener",
                                              port=80,
                                              open=True)

        health_check = aws_elasticloadbalancingv2.HealthCheck(
            interval=core.Duration.seconds(60),
            path="/ping",
            port="9080",
            timeout=core.Duration.seconds(5))

        # attach ALB to ECS service
        listener.add_targets(
            "gateway",
            port=80,
            targets=[gateway_fargate_service, echo_fargate_service],
            health_check=health_check,
        )

        # outputs of ALB and cluster
        core.CfnOutput(self,
                       "LoadBalancerDNS",
                       value=load_balancer.load_balancer_dns_name)
        core.CfnOutput(self, "ClusterName", value=cluster.cluster_name)
Ejemplo n.º 25
0
                     instance_type=ec2.InstanceType("t2.micro"))

# Create a task definition with its own elastic network interface
task_definition = ecs.Ec2TaskDefinition(
    stack,
    "nginx-awsvpc",
    network_mode=ecs.NetworkMode.AWS_VPC,
)

web_container = task_definition.add_container(
    "nginx",
    image=ecs.ContainerImage.from_registry("nginx:latest"),
    cpu=100,
    memory_limit_mib=256,
    essential=True)
port_mapping = ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP)
web_container.add_port_mappings(port_mapping)

# Create a security group that allows HTTP traffic on port 80 for our
# containers without modifying the security group on the instance
security_group = ec2.SecurityGroup(stack,
                                   "nginx--7623",
                                   vpc=vpc,
                                   allow_all_outbound=False)
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80))

# Create the service
service = ecs.Ec2Service(stack,
                         "awsvpc-ecs-demo-service",
                         cluster=cluster,
                         task_definition=task_definition,
Ejemplo n.º 26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ******* Database table
        audiobooksDB = aws_dynamodb.Table(
            self,
            "audiobooksDB",
            partition_key=aws_dynamodb.Attribute(
                name="id", type=aws_dynamodb.AttributeType.STRING),
            read_capacity=2,
            write_capacity=2,
            billing_mode=aws_dynamodb.BillingMode.PROVISIONED)

        # ******* Lambda functions
        book_upload_lambda_function = aws_lambda.Function(
            self,
            "HandleBookUploadLambda",
            handler='app.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(
                '../Functions/handlers/handle_book_upload'))
        polly_audio_lambda_function = aws_lambda.Function(
            self,
            "HandlePollyAudioLambda",
            handler='app.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(
                '../Functions/handlers/handle_polly_audio'),
            timeout=core.Duration.seconds(120))

        # ******* S3 upload buckets
        BookUploadBucket = aws_s3.Bucket(self, "BookUploadBucket")
        AudioUploadBucket = aws_s3.Bucket(self, "AudioUploadBucket")
        VideoUploadBucket = aws_s3.Bucket(self, "VideoUploadBucket")
        ImageUploadBucket = aws_s3.Bucket(self, "ImageUploadBucket")

        # ******* Create S3 event source
        book_upload_lambda_function.add_event_source(
            S3EventSource(BookUploadBucket,
                          events=[aws_s3.EventType.OBJECT_CREATED],
                          filters=[{
                              "suffix": '.txt'
                          }]))
        # ******* Create SNS topic
        PollySNSTopic = aws_sns.Topic(self, "PollySNSTopic")
        PollySNSTopic.add_subscription(
            aws_sns_subscriptions.LambdaSubscription(
                polly_audio_lambda_function))

        # ******* Book function environment variables
        book_upload_lambda_function.add_environment("TABLE_NAME",
                                                    audiobooksDB.table_name)
        book_upload_lambda_function.add_environment(
            "AUDIO_S3_BUCKET", AudioUploadBucket.bucket_name)
        book_upload_lambda_function.add_environment("SNS_TOPIC",
                                                    PollySNSTopic.topic_arn)

        # ******* Book function permissions
        audiobooksDB.grant_write_data(book_upload_lambda_function)
        BookUploadBucket.grant_read(book_upload_lambda_function)
        AudioUploadBucket.grant_write(book_upload_lambda_function)
        PollySNSTopic.grant_publish(book_upload_lambda_function)
        book_upload_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["polly:*"], resources=["*"]))

        # ******* Fargate container permissions
        role = aws_iam.Role(
            self,
            "FargateContainerRole",
            assumed_by=aws_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:PutObject"],
                resources=[VideoUploadBucket.bucket_arn + "/*"]))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[AudioUploadBucket.bucket_arn + "/*"]))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[ImageUploadBucket.bucket_arn + "/*"]))

        # ******* Fargate container
        vpc = aws_ec2.Vpc(self, "CdkFargateVpc", max_azs=2)
        cluster = aws_ecs.Cluster(self, 'FargateCluster', vpc=vpc)
        image = aws_ecs.ContainerImage.from_asset(
            "../Functions/ECSContainerFiles")
        task_definition = aws_ecs.FargateTaskDefinition(
            self,
            "FargateContainerTaskDefinition",
            execution_role=role,
            task_role=role,
            cpu=1024,
            memory_limit_mib=3072)

        port_mapping = aws_ecs.PortMapping(container_port=80, host_port=80)
        container = task_definition.add_container(
            "Container",
            image=image,
            logging=aws_ecs.AwsLogDriver(
                stream_prefix="videoProcessingContainer"))
        container.add_port_mappings(port_mapping)

        # ******* Audio function environment variables
        polly_audio_lambda_function.add_environment(
            "VIDEO_S3_BUCKET", VideoUploadBucket.bucket_name)
        polly_audio_lambda_function.add_environment(
            "TASK_DEFINITION_ARN", task_definition.task_definition_arn)
        polly_audio_lambda_function.add_environment("CLUSTER_ARN",
                                                    cluster.cluster_arn)
        polly_audio_lambda_function.add_environment("TABLE_NAME",
                                                    audiobooksDB.table_name)
        polly_audio_lambda_function.add_environment("CONTAINER_NAME",
                                                    container.container_name)
        polly_audio_lambda_function.add_environment("VPC_ID", str(vpc.vpc_id))

        # ******* Audio function permissions
        audiobooksDB.grant_read_write_data(polly_audio_lambda_function)
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["ecs:RunTask"], resources=["*"]))
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["iam:PassRole"], resources=["*"]))
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["ec2:DescribeSubnets"],
                                    resources=["*"]))
    def __init__(self, scope: core.Construct, construct_id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point
        #Access points allow multiple WordPress file systems to live on the same EFS Volume
        #The more data on an EFS volume the better it will preform
        #This provides a high level of security while also optimizing performance
        AccessPoint = props['file_system'].add_access_point(
            "local-access-point",
            path=f"/{props['IdentifierName']}",
            create_acl=efs.Acl(
                owner_uid=
                "100",  #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/
                owner_gid="101",
                permissions="0755"))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster
        cluster = ecs.Cluster(
            self,
            "Cluster",
            vpc=props['vpc'],
            container_insights=props['ecs_enable_container_insights'])

        #Get needed secrets
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ssm/StringParameter.html?highlight=from_secure_string_parameter_attributes#aws_cdk.aws_ssm.StringParameter.from_secure_string_parameter_attributes
        # ParameterStoreTest = ssm.StringParameter.from_secure_string_parameter_attributes( self, "ParameterStoreTest",
        #     parameter_name="", #Remeber, KMS permissions for task execution role for parameter store key!
        #     version=1
        # )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Secret.html
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/SecretStringGenerator.html
        dbtest = {
            "database_name": '',
            "username": '',
            "host": str(props["rds_instance"].cluster_endpoint.hostname)
        }
        WordpressDbConnectionSecret = secretsmanager.Secret(
            self,
            "WordpressDbConnectionSecret",
            generate_secret_string=secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps(dbtest),
                generate_string_key="password",
                exclude_characters='/"'))

        #ToDO: Lambda call to populate secrets but only

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Volume.html#aws_cdk.aws_ecs.Volume
        WordpressEfsVolume = ecs.Volume(
            name="efs",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=props['file_system'].file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    access_point_id=AccessPoint.access_point_id)))

        #Create Task Definition
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html
        WordpressTask = ecs.FargateTaskDefinition(
            self,
            "TaskDefinition",
            cpu=props['ecs_cpu_size'],
            memory_limit_mib=props['ecs_memory_size'],
            volumes=[WordpressEfsVolume])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container
        WordpressContainer = WordpressTask.add_container(
            "Wordpress",
            image=ecs.ContainerImage.from_ecr_repository(
                repository=ecr.Repository.from_repository_name(
                    self,
                    "wpimage",
                    repository_name=props['ecs_container_repo_name']),
                tag=props['ecs_container_tag']),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="container",
                #log_group = "{props['environment']}/{props['unit']}/{props['application']}", #ToDo make sure I like log group name
                log_retention=logs.RetentionDays(
                    props['ecs_log_retention_period'])),
            environment={
                "TROUBLESHOOTING_MODE_ENABLED":
                props['TROUBLESHOOTING_MODE_ENABLED']
            },
            secrets={
                # "PARAMETERSTORETEST": ecs.Secret.from_ssm_parameter( ParameterStoreTest ),
                "DBHOST":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "host"),
                "DBUSER":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "username"),
                "DBUSERPASS":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "password"),
                "DBNAME":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "database_name")
            },
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings
        WordpressContainer.add_port_mappings(
            ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings
        #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201
        WordpressContainer.add_mount_points(
            ecs.MountPoint(container_path=props['ecs_container_efs_path'],
                           read_only=False,
                           source_volume=WordpressEfsVolume.name))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs_patterns/ApplicationLoadBalancedFargateService.html
        EcsService = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "EcsService",
            cluster=cluster,
            desired_count=props['ecs_container_desired_count'],
            task_definition=WordpressTask,
            enable_ecs_managed_tags=True,
            public_load_balancer=True,
            domain_name=props['domain_name'],
            domain_zone=route53.HostedZone.from_hosted_zone_attributes(
                self,
                "hostedZone",
                hosted_zone_id=props['domain_zone'],
                zone_name=props['zone_name']),
            listener_port=443,
            redirect_http=True,
            protocol=elasticloadbalancingv2.ApplicationProtocol("HTTPS"),
            target_protocol=elasticloadbalancingv2.ApplicationProtocol("HTTP"),
            platform_version=ecs.FargatePlatformVersion(
                "VERSION1_4"),  #Required for EFS
            security_groups=[
                ec2.SecurityGroup.from_security_group_id(
                    self,
                    "EcsToRdsSeurityGroup",
                    security_group_id=props["EcsToRdsSeurityGroup"].
                    security_group_id)
            ],
        )

        #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Connections.html#aws_cdk.aws_ec2.Connections
        EcsService.service.connections.allow_to(
            props['file_system'],
            ec2.Port.tcp(2049))  #Open hole to ECS in EFS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationTargetGroup.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup.set_attribute
        EcsService.target_group.set_attribute(
            key="load_balancing.algorithm.type",
            value="least_outstanding_requests")
        EcsService.target_group.set_attribute(
            key="deregistration_delay.timeout_seconds", value="30")
        EcsService.target_group.configure_health_check(
            healthy_threshold_count=5,  #2-10
            timeout=core.Duration.seconds(29),
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html#aws_cdk.aws_ecs.FargateService.auto_scale_task_count
        ECSAutoScaler = EcsService.service.auto_scale_task_count(
            max_capacity=props['ecs_container_max_count'],
            min_capacity=props['ecs_container_min_count'])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ScalableTaskCount.html#aws_cdk.aws_ecs.ScalableTaskCount
        ECSAutoScaler.scale_on_cpu_utilization(
            "cpuScale",
            target_utilization_percent=80,
            scale_out_cooldown=core.Duration.seconds(30),
            scale_in_cooldown=core.Duration.seconds(60))
        ECSAutoScaler.scale_on_memory_utilization(
            "memScale",
            target_utilization_percent=80,
            scale_out_cooldown=core.Duration.seconds(30),
            scale_in_cooldown=core.Duration.seconds(60))
    def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

	#############################################
	#Import resorce and custom setting part start
	#############################################
        #cn-north-1
        impRes={
                 "vpc":"vpc-0883083ff3a10c1ec",
                 "SvcSG":"sg-04d3b60e954c1c1ef",
                 "ALBSG":"sg-0b6d093d52d48bba9",
                 "ALBInternet":True,
                 "taskRole":"arn:aws-cn:iam::627484392488:role/ecsTaskExecutionRole",
                 "AlbSubnet":[
                       {"subnetId":"subnet-0d16fa0c969f234d3",
                        "routeTabId":"rtb-074c6b532f3030ad6"},
                       {"subnetId":"subnet-0f28a97c04d3b11cd",
                        "routeTabId":"rtb-074c6b532f3030ad6"}
                 ],
                 #"SvcSubNet":[{"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-074c6b532f3030ad6"}]
                 "SvcSubNet":[{"subnetId":"subnet-0f28a97c04d3b11cd","routeTabId":"rtb-0587cc522717461cd"},
                              {"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-0587cc522717461cd"}]
               }
        newRes={
                 "TG":{"HealthPath":"/test.html","Port":80,"containPort":80},
                 "Listener":{"Port":80},
                 "TaskFamily":"tsFargate",
                 "ImageAsset1":{"DockfilePath":"httpd-ssh",
                                "BuildArgs":{"HTTP_PROXY":"http://YOUR_PROXY_SERVER:80"}
                               }
               }

        MyTaskDefinition=[{"Cpu":512,"MemLimitMib":1024}]
        MyContainerDefinition=[
             {"containerName":"MyContainer1",
              "cpu":256,
              "essential":True,
              "portMappings":[ecs.PortMapping(container_port=80,host_port=80)], #"portMappings":[ecs.PortMapping(container_port=80,host_port=80),ecs.PortMapping(container_port=22,host_port=22)],
              "environment":{"SSH_PUBLIC_KEY":"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC/alWrS+HH5KkPbso+Tsy+Z0WGTX5wvXvon5OacLMyOU3gj2mbbIifasXf/RadpuywuyW3uFirtRlPmSb5Q0PVLODku503Xettw+u6/Z22VV7F2ACgg4iHaCo2SR4L8saUrLLfcKXKr/WCn3w7uYcqGsXEcSFCCSZgn4BoZJqP4Q=="},
              "LogMountPoint":["/usr/local/apache2/logs"]
             }
        ]
        MySvc={"AssignPubIp":True, "desiredCount":1}
	#############################################
	#Import resorce and custom setting part end
	#############################################
		
        #if you import external resource app you cannot set destory policy
        #import VPC, Private Subnet, SG
        vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_id=impRes["vpc"])	
        
        #import SG
        mysvcsg = ec2.SecurityGroup.from_security_group_id(self, "svcsg", 
                impRes["SvcSG"],
                mutable=False)
        
        #import Role
        taskRole = iam.Role.from_role_arn(self, "TaskRole",impRes["taskRole"])
        
        #create ALB        
        mytargetGrp = elbv2.ApplicationTargetGroup(self, "targetGrp",
                target_type=elbv2.TargetType.IP,
                port=newRes["TG"]["Port"],
                vpc=vpc,
                health_check=elbv2.HealthCheck(path=newRes["TG"]["HealthPath"]))
        #target group cannot use .apply_removal_policy directly
        cfn_mytargetGrp=mytargetGrp.node.find_child("Resource")
        cfn_mytargetGrp.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
		
        #import public subnet for alb
        albsubnets = [
                ec2.Subnet.from_subnet_attributes(self,'albsubnetid1',
                    subnet_id = impRes["AlbSubnet"][0]["subnetId"], 
                    route_table_id=impRes["AlbSubnet"][0]["routeTabId"]
                ),
                ec2.Subnet.from_subnet_attributes(self,'albsubnetid2',
                    subnet_id = impRes["AlbSubnet"][1]["subnetId"], 
                    route_table_id=impRes["AlbSubnet"][1]["routeTabId"]
                )
        ]		
        vpc_subnets_selection = ec2.SubnetSelection(subnets=albsubnets)
        #create new ALB
        myalb = elbv2.ApplicationLoadBalancer(self, "ALBv2",
                vpc=vpc,
                security_group=ec2.SecurityGroup.from_security_group_id(self, "ALBSG", impRes["ALBSG"],mutable=False),
                internet_facing=impRes["ALBInternet"],
                vpc_subnets=vpc_subnets_selection)
        myalb.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        #create new ALB listener
        myalblistener = elbv2.ApplicationListener(self, "ALBlistenter", 
                load_balancer=myalb, 
                port=newRes["Listener"]["Port"])
        myalblistener.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
        myalblistener.add_target_groups("albaddtg", target_groups=[mytargetGrp])
        
        
        #create new ECS Cluster
        mycluster = ecs.Cluster(self, "cluster", vpc=vpc)
        mycluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
        
        fargatetaskDefinition = ecs.FargateTaskDefinition(self, "fargatetaskDefinition",
                cpu=MyTaskDefinition[0]["Cpu"],
                memory_limit_mib=MyTaskDefinition[0]["MemLimitMib"],
                execution_role=taskRole,
                family=newRes["TaskFamily"],
                task_role=taskRole)
                #volumes=myEfsVols)      
        fargatetaskDefinition.apply_removal_policy(cdk.RemovalPolicy.DESTROY)


        #defind docker image asset
        dirname = os.path.dirname(__file__)
        #for container 1 normally httpd
        #create Image assent image will generated locally then push to ecr
        asset1 = DockerImageAsset(self, "ImageAsset1",
                   directory=os.path.join(dirname, "../..", newRes["ImageAsset1"]["DockfilePath"]),
                   build_args=newRes["ImageAsset1"]["BuildArgs"]
                )
         
        #create container definition for task definition
        MyContainer1def = ecs.ContainerDefinition(self, "MyContainer1def",
                task_definition=fargatetaskDefinition,
                linux_parameters=ecs.LinuxParameters(self,"LinuxPara1",init_process_enabled=True),
                image=ecs.ContainerImage.from_ecr_repository(asset1.repository, asset1.image_uri.rpartition(":")[-1]),
                container_name=MyContainerDefinition[0]["containerName"],
                essential=MyContainerDefinition[0]["essential"],
                port_mappings=MyContainerDefinition[0]["portMappings"],
                environment=MyContainerDefinition[0]["environment"]
        )
	
	#import service private subnet
        mysvcprivateSNs = [
                ec2.Subnet.from_subnet_attributes(self,'svcprivateSN1',
                    subnet_id = impRes["SvcSubNet"][0]["subnetId"], 
                    route_table_id=impRes["SvcSubNet"][0]["routeTabId"]),
                ec2.Subnet.from_subnet_attributes(self,'svcprivateSN2',
                    subnet_id = impRes["SvcSubNet"][1]["subnetId"], 
                    route_table_id=impRes["SvcSubNet"][1]["routeTabId"])
        ]

        #create service
        myservice=ecs.FargateService(self,"service",
                task_definition=fargatetaskDefinition,
                assign_public_ip=MySvc["AssignPubIp"],
                platform_version=ecs.FargatePlatformVersion.VERSION1_4,
                vpc_subnets=ec2.SubnetSelection(subnets=mysvcprivateSNs),
                security_group=mysvcsg,
                cluster=mycluster,
                desired_count=MySvc["desiredCount"])
        
        mytargetGrp.add_target(myservice.load_balancer_target(container_name="MyContainer1",container_port=newRes["TG"]["containPort"], protocol=ecs.Protocol.TCP))
Ejemplo n.º 29
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pearson_vpn_connection = ec2.Peer.ipv4('159.182.0.0/16')

        # Props Setup
        stage = scope.node.try_get_context('stage')
        my_service_name = scope.node.try_get_context('serviceName')
        api_health_path = props['apiHealthPath']
        tca_health_path = props['tcaHealthPath']

        # Setup IAM user for logs
        vpc_flow_role = iam.Role(
            self,
            'FlowLog',
            assumed_by=iam.ServicePrincipal('vpc-flow-logs.amazonaws.com'))

        vpc_flow_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=[
                                    'iam:PassRole', 'logs:CreateLogGroup',
                                    'logs:DescribeLogGroups',
                                    'logs:CreateLogStream', 'logs:PutLogEvents'
                                ],
                                resources=["*"]))

        # Create Cloudwatch log group
        log_group = logs.LogGroup(self,
                                  'LogGroup',
                                  log_group_name="{0}-{1}".format(
                                      my_service_name, stage),
                                  retention=logs.RetentionDays('ONE_YEAR'),
                                  removal_policy=core.RemovalPolicy('DESTROY'))

        # Setup VPC resource
        vpc = ec2.Vpc(self,
                      '{0}-{1}-vpc'.format(my_service_name, stage),
                      cidr=props['cidr'],
                      max_azs=props['vpcAzCount'])

        # Setup VPC flow logs
        vpc_log = ec2.CfnFlowLog(
            self,
            'FlowLogs',
            resource_id=vpc.vpc_id,
            resource_type='VPC',
            traffic_type='ALL',
            deliver_logs_permission_arn=vpc_flow_role.role_arn,
            log_destination_type='cloud-watch-logs',
            log_group_name="{0}-{1}".format(log_group.log_group_name, stage))

        # Setup Security Group in VPC
        vpc_sg = ec2.SecurityGroup(self,
                                   'EcSSG',
                                   vpc=vpc,
                                   allow_all_outbound=None,
                                   description="Security Group for Oculus vpc",
                                   security_group_name="{0}-{1}-vpc-sg".format(
                                       my_service_name, stage))

        # Add Rules to Security Group
        vpc_sg.add_ingress_rule(peer=pearson_vpn_connection,
                                connection=ec2.Port.tcp(22))

        # ALB Security Group
        alb_sg = ec2.SecurityGroup(self,
                                   'AlbSG',
                                   vpc=vpc,
                                   allow_all_outbound=None,
                                   description="Security group for oculus ALB",
                                   security_group_name="{0}-{1}-alb-sg".format(
                                       my_service_name, stage))

        # Add HTTPS Rule to Security Group
        alb_sg.add_ingress_rule(peer=pearson_vpn_connection,
                                connection=ec2.Port.tcp(443))

        # Setup ALB
        alb = elbv2.ApplicationLoadBalancer(self,
                                            'ALB',
                                            vpc=vpc,
                                            internet_facing=True,
                                            security_group=alb_sg)

        # Setup API Target Group
        api_tg = elbv2.ApplicationTargetGroup(
            self,
            'ApiTargetGroup',
            port=8080,
            protocol=elbv2.ApplicationProtocol.HTTP,
            vpc=vpc)

        # Setup Web Target Group
        web_tg = elbv2.ApplicationTargetGroup(
            self,
            'WebTargetGroup',
            port=3030,
            protocol=elbv2.ApplicationProtocol.HTTP,
            vpc=vpc)

        # Setup API Target Group
        tca_tg = elbv2.ApplicationTargetGroup(
            self,
            'TcaTargetGroup',
            port=8080,
            protocol=elbv2.ApplicationProtocol.HTTP,
            vpc=vpc)

        # Setup ECS Cluster
        ecs_cluster = ecs.Cluster(self,
                                  'ECSCluster',
                                  vpc=vpc,
                                  cluster_name="{0}-{1}".format(
                                      my_service_name, stage))

        # ECS Execution Role - Grants ECS agent to call AWS APIs
        ecs_execution_role = iam.Role(
            self,
            'ECSExecutionRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            role_name="{0}-{1}-execution-role".format(my_service_name, stage))

        # Setup Role Permissions
        ecs_execution_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    'elasticloadbalancing:DeregisterInstancesFromLoadBalancer',
                    'elasticloadbalancing:DeregisterTargets',
                    'elasticloadbalancing:Describe*',
                    'elasticloadbalancing:RegisterInstancesWithLoadBalancer',
                    'elasticloadbalancing:RegisterTargets', 'ec2:Describe*',
                    'ec2:AuthorizeSecurityGroupIngress', 'sts:AssumeRole',
                    'ssm:GetParameters', 'secretsmanager:GetSecretValue',
                    'ecr:GetAuthorizationToken',
                    'ecr:BatchCheckLayerAvailability',
                    'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage',
                    'logs:CreateLogStream', 'logs:PutLogEvents',
                    "application-autoscaling:*", "cloudwatch:DescribeAlarms",
                    "cloudwatch:PutMetricAlarm"
                ],
                resources=["*"]))

        # ECS Task Role - Grants containers in task permission to AWS APIs
        ecs_task_role = iam.Role(
            self,
            'ECSTaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            role_name="{0}-{1}-task-role".format(my_service_name, stage))

        # Setup Role Permissions
        ecs_task_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=[
                                    'logs:CreateLogStream',
                                    'logs:PutLogEvents', 'dynamodb:Query',
                                    'dynamodb:ListTables',
                                    'secretsmanager:GetSecretValue',
                                    'kms:Decrypt'
                                ],
                                resources=["*"]))

        # Setup API Task Definition
        api_taskdef = ecs.FargateTaskDefinition(
            self,
            'APIFargateTask',
            memory_limit_mib=512,
            cpu=256,
            execution_role=ecs_execution_role,
            task_role=ecs_task_role,
            family="{0}-{1}-api".format(my_service_name, stage))

        # Setup Web Task Definition
        web_taskdef = ecs.FargateTaskDefinition(
            self,
            'WebFargateTask',
            memory_limit_mib=512,
            cpu=256,
            execution_role=ecs_execution_role,
            task_role=ecs_task_role,
            family="{0}-{1}-web".format(my_service_name, stage))

        # # Setup TCA Task Definition
        tca_taskdef = ecs.FargateTaskDefinition(
            self,
            'TcaFargateTask',
            memory_limit_mib=512,
            cpu=256,
            execution_role=ecs_execution_role,
            task_role=ecs_task_role,
            family="{0}-{1}-tca".format(my_service_name, stage))

        api_repo = ecr.Repository.from_repository_arn(
            self,
            'ApiImage',
            repository_arn=
            "arn:aws:ecr:us-east-1:346147488134:repository/oculus-api")

        web_repo = ecr.Repository.from_repository_arn(
            self,
            'WebImage',
            repository_arn=
            "arn:aws:ecr:us-east-1:346147488134:repository/oculus-web")

        tca_repo = ecr.Repository.from_repository_arn(
            self,
            'TcaImage',
            repository_arn=
            "arn:aws:ecr:us-east-1:346147488134:repository/oculus-tca-api")

        # Add Container API to Task
        api_container = api_taskdef.add_container(
            "oculus-cdk-{}-api".format(stage),
            image=ecs.EcrImage(repository=api_repo, tag="devqaurl"),
            logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-api".format(
                my_service_name, stage),
                                           log_group=log_group))

        # Add Container Web to Task
        web_container = web_taskdef.add_container(
            "oculus-cdk-{}-web".format(stage),
            image=ecs.EcrImage(repository=web_repo, tag="removeMetaMockup"),
            logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-web".format(
                my_service_name, stage),
                                           log_group=log_group))

        # # Add Container TCA to Task
        tca_container = tca_taskdef.add_container(
            "oculus-cdk-{}-tca".format(stage),
            image=ecs.EcrImage(repository=tca_repo, tag="ocu-1109"),
            logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-tca".format(
                my_service_name, stage),
                                           log_group=log_group))

        # Setup API Port Mappings
        api_container.add_port_mappings(
            ecs.PortMapping(container_port=8080,
                            host_port=8080,
                            protocol=ecs.Protocol.TCP))

        # Setup Web Port Mappings
        web_container.add_port_mappings(
            ecs.PortMapping(container_port=3030,
                            host_port=3030,
                            protocol=ecs.Protocol.TCP))

        # # Setup TCA Port Mappings
        tca_container.add_port_mappings(
            ecs.PortMapping(container_port=8080,
                            host_port=8080,
                            protocol=ecs.Protocol.TCP))

        # Setup API Fargate Service
        api_service = ecs.FargateService(self,
                                         "FargateServiceAPI",
                                         task_definition=api_taskdef,
                                         cluster=ecs_cluster,
                                         desired_count=1,
                                         service_name="{0}-{1}-api".format(
                                             my_service_name, stage))

        api_scaling = api_service.auto_scale_task_count(max_capacity=5)
        api_scaling.scale_on_cpu_utilization('ApiCpuScaling',
                                             target_utilization_percent=50)

        # Setup Web Fargate Service
        web_service = ecs.FargateService(self,
                                         "FargateServiceWeb",
                                         task_definition=web_taskdef,
                                         cluster=ecs_cluster,
                                         desired_count=1,
                                         service_name="{0}-{1}-web".format(
                                             my_service_name, stage))

        web_scaling = web_service.auto_scale_task_count(max_capacity=5)
        web_scaling.scale_on_cpu_utilization('WebCpuScaling',
                                             target_utilization_percent=50)

        # # Setup TCA Fargate Service
        tca_service = ecs.FargateService(self,
                                         "FargateServiceTCA",
                                         task_definition=tca_taskdef,
                                         cluster=ecs_cluster,
                                         desired_count=1,
                                         service_name="{0}-{1}-tca".format(
                                             my_service_name, stage))

        tca_scaling = tca_service.auto_scale_task_count(max_capacity=5)
        tca_scaling.scale_on_cpu_utilization('TcaCpuScaling',
                                             target_utilization_percent=50)

        # Setup ALB Listener
        alb_listener = alb.add_listener(
            'Listener',
            certificate_arns=[
                "arn:aws:acm:us-east-1:829809672214:certificate/a84bb369-03ce-4e5e-9d32-8c84609cad1e"
            ],
            port=443,
            open=False,
            protocol=elbv2.ApplicationProtocol.HTTPS)

        # Attach ALB to ECS API Service
        api_target = alb_listener.add_targets(
            'ECSAPI',
            port=8080,
            priority=1,
            targets=[api_service],
            health_check=elbv2.HealthCheck(path=api_health_path),
            path_pattern='/oculus-api/*')

        # # Attach ALB to ECS TCA Service
        tca_target = alb_listener.add_targets(
            'ECSTCA',
            port=8080,
            priority=2,
            targets=[tca_service],
            health_check=elbv2.HealthCheck(path=tca_health_path),
            path_pattern='/tca/*')

        # Attach ALB to ECS Web Service
        web_target = alb_listener.add_targets(
            'ECSWeb',
            port=3030,
            protocol=elbv2.ApplicationProtocol.HTTP,
            targets=[web_service],
            health_check=elbv2.HealthCheck(path='/'),
        )

        core.CfnOutput(self,
                       'LoadBalancerDNS',
                       value=alb.load_balancer_dns_name)

        zone = route53.HostedZone.from_lookup(self,
                                              'MyHostedZone',
                                              domain_name=props['zoneDomain'])

        route53.ARecord(
            self,
            'ServiceAliasRecord',
            record_name=props['siteDomain'],
            target=route53.RecordTarget(
                alias_target=aws_route53_targets.LoadBalancerTarget(
                    load_balancer=alb)),
            zone=zone)
Ejemplo n.º 30
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, repository: ecr.Repository, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        namespace = servicediscovery.PrivateDnsNamespace(
            scope=self,
            id="PRIVATE-DNS",
            vpc=vpc,
            name="private",
            description="a private dns"
        )

        sg = ec2.SecurityGroup(
            scope=self,
            id="SG",
            vpc=vpc,
            allow_all_outbound=True,
            description="open 9200 and 9300 ports",
            security_group_name="es-group"
        )
        sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(port=9200),
        )
        sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(port=9300),
        )

        #####################################################
        elastic_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="ES-TASK-DEF",
            network_mode=ecs.NetworkMode.AWS_VPC,
            volumes=[ecs.Volume(
                name="esdata",
                host=ecs.Host(source_path="/usr/share/elasticsearch/data"),
            )],
        )

        elastic = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=30),
            task_definition=elastic_task_def,
            memory_limit_mib=4500,
            essential=True,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=repository, tag='latest'),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                # "discovery.zen.ping.unicast.hosts": "elasticsearch",
                "node.name": constants.ES_CONTAINER_NAME,
                "node.master": "true",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms4g -Xmx4g",
            },
            logging=ecs.AwsLogDriver(
                stream_prefix="ES",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535))
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))

        elastic.add_port_mappings(ecs.PortMapping(container_port=9200))
        elastic.add_port_mappings(ecs.PortMapping(container_port=9300))

        elastic.add_mount_points(ecs.MountPoint(
            container_path="/usr/share/elasticsearch/data",
            source_volume="esdata",
            read_only=False,
        ))
        # elastic.add_volumes_from(ecs.VolumeFrom(
        #     source_container="esdata",
        #     read_only=False,
        #     ))

        es_service = ecs.Ec2Service(
            scope=self,
            id="ES-SERVICE",
            cluster=cluster,
            task_definition=elastic_task_def,
            desired_count=1,
            service_name="ES",
            security_group=sg,
        )

        es_lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="ES-ELB",
            vpc=vpc,
            internet_facing=True,
        )
        es_listener = es_lb.add_listener(
            id="ES-LISTENER",
            port=80,
        )
        es_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="ES-GRP",
                container_name=elastic.container_name,
                listener=ecs.ListenerConfig.application_listener(
                    listener=es_listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))

        service = es_service.enable_cloud_map(
            cloud_map_namespace=namespace,
            dns_record_type=servicediscovery.DnsRecordType.A,
            # dns_ttl=core.Duration.seconds(amount=30),
            failure_threshold=1,
            name="elastic",
        )

        core.CfnOutput(
            scope=self,
            id="DNS-ES",
            value=es_lb.load_balancer_dns_name,
        )

        #####################################################

        node_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="NODE-TASK-DEF",
            network_mode=ecs.NetworkMode.AWS_VPC,
            volumes=[ecs.Volume(
                name="esdata",
                host=ecs.Host(source_path="/usr/share/elasticsearch/data"),
            )],
        )

        node = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_NODE_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=40),
            task_definition=node_task_def,
            memory_limit_mib=4500,
            essential=True,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=repository, tag='latest'),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                "discovery.zen.ping.unicast.hosts": "elastic.private",
                "node.name": constants.ES_NODE_CONTAINER_NAME,
                "node.master": "false",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms4g -Xmx4g",
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="NODE",
                log_retention=logs.RetentionDays.ONE_DAY,
            ))

        node.add_port_mappings(ecs.PortMapping(container_port=9200))
        node.add_port_mappings(ecs.PortMapping(container_port=9300))

        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536))
        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))
        node.add_mount_points(ecs.MountPoint(
            container_path="/usr/share/elasticsearch/data",
            source_volume="esdata",
            read_only=False,
        ))

        node_service = ecs.Ec2Service(
            scope=self,
            id="ES-NODE-SERVICE",
            cluster=cluster,
            task_definition=node_task_def,
            desired_count=1,
            service_name="NODE",
            security_group=sg,
        )

        node_lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="NODE-ELB",
            vpc=vpc,
            internet_facing=True,
        )
        node_listener = node_lb.add_listener(
            id="NODE-LISTENER",
            port=80,
        )
        node_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="NODE-GRP",
                container_name=node.container_name,
                listener=ecs.ListenerConfig.application_listener(
                    listener=node_listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))
        core.CfnOutput(
            scope=self,
            id="DNS-NODE",
            value=node_lb.load_balancer_dns_name,
        )