Beispiel #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, *kwargs)

        # Create a cluster
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        # Create Fargate Service
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "sample-app",
            cluster=cluster,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        # Setup AutoScaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60),
        )

        core.CfnOutput(
            self,
            "LoadBalancerDNS",
            value=fargate_service.load_balancer.load_balancer_dns_name)
Beispiel #2
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ECS cluster for fargate
        cluster = ecs.Cluster(self, "ECS", vpc=props['vpc'])

        # Create Fargate Service
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "Service",
            cluster=cluster,
            desired_count=2,
            task_image_options={
                'image': ecs.ContainerImage.from_ecr_repository(props['ecr'])
            })

        # Setup AutoScaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=4)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60),
        )
Beispiel #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, *kwargs)

        # Create VPC and Fargate Cluster
        # NOTE: Limit AZs to avoid reaching resource quotas
        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        cluster = ecs.Cluster(
            self, 'Ec2Cluster',
            vpc=vpc
        )

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self, "FargateService",
            cluster=cluster,
            image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
        )

        core.CfnOutput(
            self, "LoadBalancerDNS",
            value=fargate_service.load_balancer.load_balancer_dns_name
        )
Beispiel #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, *kwargs)

        # Create VPC and Fargate Cluster
        # NOTE: Limit AZs to avoid reaching resource quotas
        vpc = ec2.Vpc(self, "MyVpc", max_azs=2)

        cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "FargateService",
            cluster=cluster,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=ec2.Port.tcp(80),
                                description="Allow http inbound from VPC")

        core.CfnOutput(
            self,
            "LoadBalancerDNS",
            value=fargate_service.load_balancer.load_balancer_dns_name)
Beispiel #5
0
    def create_fagate_NLB_autoscaling(self, vpc):
        cluster = ecs.Cluster(
            self, 'fargate-service-autoscaling',
            vpc=vpc
        )

        # Create Fargate Service
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self, "sample-app",
            cluster=cluster,
            task_image_options={
                'image': ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            }
        )

        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer = ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection = ec2.Port.tcp(80),
            description="Allow http inbound from VPC"
        )

        # Setup AutoScaling policy
        scaling = fargate_service.service.auto_scale_task_count(
            max_capacity=2
        )
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60),
        )
Beispiel #6
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create a cluster
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        # Create Fargate Service
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "sample-app",
            cluster=cluster,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=ec2.Port.tcp(80),
                                description="Allow http inbound from VPC")

        # Setup AutoScaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=Duration.seconds(60),
            scale_out_cooldown=Duration.seconds(60),
        )

        CfnOutput(self,
                  "LoadBalancerDNS",
                  value=fargate_service.load_balancer.load_balancer_dns_name)
Beispiel #7
0
    def FargateTasks(self, cluster, svcname, svcvalue, env_var):
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "Fargate-" + svcname,
            cluster=cluster,
            task_image_options={
                'image': ecs.ContainerImage.from_registry(svcvalue['image']),
                'container_port': svcvalue.get('port', 80),
                'container_name': svcname,
                'enable_logging ': True,
                'environment': env_var,
                'secret': env_var,
            },
        )
        scaling = fargate_service.service.auto_scale_task_count(
            min_capacity=2,
            max_capacity=5,
        )
        scaling.scale_on_cpu_utilization(
            "CpuScaling" + svcname,
            policy_name="CpuScaling" + svcname,
            target_utilization_percent=10,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60),
        )
        scaling.scale_on_memory_utilization(
            "MemoryScaling" + svcname,
            policy_name="MemoryScaling" + svcname,
            target_utilization_percent=600,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60),
        )

        return self.FargateOutput(svcname, fargate_service)
Beispiel #8
0
    def _create_mlflow_server(self):
        """
        Create a Farget task for MLflow server
        """
        cluster = ecs.Cluster(scope=self, id="CLUSTER", cluster_name=self.cluster_name, vpc=self.vpc)

        task_id = f"{self.stack_name}-{self.component_id}-MLflow"
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=task_id,
            task_role=self.role,
        )

        container_id = f"{self.stack_name}-{self.component_id}-container"
        container = task_definition.add_container(
            id=container_id,
            image=ecs.ContainerImage.from_asset(
                directory="cdk_ml_cicd_pipeline/resources/visualization/mlflow/container",
            ),
            environment={
                "BUCKET": f"s3://{self.artifact_bucket.bucket_name}",
                "HOST": self.database.db_instance_endpoint_address,
                "PORT": str(self.port),
                "DATABASE": self.dbname,
                "USERNAME": self.username,
            },
            secrets={"PASSWORD": ecs.Secret.from_secrets_manager(self.db_password_secret)},
            logging=ecs.LogDriver.aws_logs(stream_prefix='mlflow')
        )
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service_id = f"{self.stack_name}-{self.component_id}-" + "mlflow-fargate"
        self.fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id=fargate_service_id,
            service_name=self.service_name,
            cluster=cluster,
            task_definition=task_definition,
        )

        # Setup security group
        self.fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4(self.vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(5000),
            description="Allow inbound from VPC for mlflow",
        )

        # Setup autoscaling policy
        autoscaling_policy_id = f"{self.stack_name}-{self.component_id}-" + "autoscaling-policy"
        scaling = self.fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id=autoscaling_policy_id,
            target_utilization_percent=70,
            scale_in_cooldown=Duration.seconds(60),
            scale_out_cooldown=Duration.seconds(60),
        )
Beispiel #9
0
    def __init__(self, scope: core.Stack, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create VPC
        self.vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_name='api-gateway/VPC')

        # Create ECS Cluster
        self.ecs_cluster = ecs.Cluster(self, "ECSCluster", vpc=self.vpc)

        # This high level construct will build a docker image, ecr repo and connect the ecs service to allow pull access
        self.container_image = ecr.DockerImageAsset(self,
                                                    "Image",
                                                    directory="./")

        # Task definition details to define the frontend service container
        self.task_def = ecs_patterns.NetworkLoadBalancedTaskImageOptions(
            image=ecs.ContainerImage.from_ecr_repository(
                repository=self.container_image.repository),
            container_port=80,
            enable_logging=True,
            environment={"GIT_HASH": "12345"},
        )

        # Create the frontend service
        self.python_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "PythonService",
            cpu=256,
            memory_limit_mib=512,
            cluster=self.ecs_cluster,
            desired_count=1,
            task_image_options=self.task_def,
            public_load_balancer=False,
        )

        self.python_service.service.connections.allow_from_any_ipv4(
            port_range=ec2.Port(
                protocol=ec2.Protocol.ALL,
                string_representation="All port 80",
                from_port=80,
            ),
            description="Allows traffic on port 80 from NLB")

        # Create VPC Link from API Gateway to NLB
        # TODO: Make api id dynamic
        self.rest_api = apigw.RestApi.from_rest_api_id(
            self, "APIGateway", rest_api_id="6znhu1vqp6")

        # TODO: Create stage variable for vpc links
        self.gateway_vpc_link = apigw.VpcLink(
            self,
            "VPCLink",
            description="VPC Link from API Gateway to ECS Python Service",
            targets=[self.python_service.load_balancer],
            vpc_link_name="ECS_VPC_LINK")
Beispiel #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

## Create S3 Bucket. We'll populate it seperately. 
        bucket_name="{}-s3-ecs-apigw-test".format(core.Aws.ACCOUNT_ID)
        bucket = s3.Bucket(self, "s3-ecs-apigw-test",
            bucket_name=bucket_name,
            versioned=True,
            public_read_access=False
            )

## Create ECS  Cluster, Taks and Service
### Create the VPC for the demo
        vpc = ec2.Vpc(self, "MyVpc", max_azs=3)

### Create the ECS Cluster
        cluster = ecs.Cluster(self,
            "ecs-apigw-test-cluster",
            cluster_name="ecs-apigw-test-cluster",
            container_insights=True,
            vpc=vpc)
### Using the Network LoadBalance Fargate patterm, this wills create the container definition, the task definition, the service and the Network load balancer for it. 
        ecs_deploy = ecsp.NetworkLoadBalancedFargateService(self, 
            "ecs-apigw-test",
            cluster=cluster,
            cpu=512,
            desired_count=2,
            public_load_balancer=False,
            memory_limit_mib=2048,
            task_image_options=ecsp.NetworkLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry("strm/helloworld-http")
            ),
            health_check_grace_period=core.Duration.minutes(5)
            )
### Have to add an Ingress rule to allow traffic through. This applies the CIDR of the VPC. 
        ecs_deploy.service.connections.security_groups[0].add_ingress_rule(
            ec2.Peer.ipv4(vpc.vpc_cidr_block),
            ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="",
                from_port=80,
                to_port=80
            )
        )
## Create API Gateway resources

### Create the VPC Link to the Network Load Balancer
        vpc_link =apigw.VpcLink(self, 
            "ecs-test-vpc-link",
            targets = [ecs_deploy.load_balancer])
### Create the API
        api = apigw.RestApi(self, "ecs-s3-test-api",
                  rest_api_name="ECS S3 Test API",
                  description="Test API for distributing traffic to S3 and ECS",
                  binary_media_types=["image/png"])
### Create the execution role for the API methods. 
        rest_api_role = iam.Role(
            self,
            "RestAPIRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")])

### Generic Method Response that can be used with each API method
        method_response = apigw.MethodResponse(status_code="200",
            response_parameters={"method.response.header.Content-Type": True})

### Root URI
        root_integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"text/html": "$input.path('$')"},
            response_parameters={"method.response.header.Content-Type": "'text/html'"})
        root_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[root_integration_response],
            request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"},
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'"})
        root_resource_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            subdomain=bucket_name,
            path="index.html",
            options=root_integration_options)

 
        root_method = api.root.add_method("GET",
            root_resource_s3_integration,
            method_responses=[method_response])


### API URI
        api_integration = apigw.Integration(
            type=apigw.IntegrationType.HTTP_PROXY,
            uri="http://{}".format(ecs_deploy.load_balancer.load_balancer_dns_name),
            integration_http_method="GET",
            options={
                "connection_type": apigw.ConnectionType.VPC_LINK,
                "vpc_link": vpc_link
            }
        )
        apis = api.root.add_resource("apis")
        apii = apis.add_resource("{api}")
        # apis = api.root.add_resource("apis")
        apii_get = apii.add_method("GET",
            api_integration,
            method_responses=[method_response],
            request_parameters={
                "method.request.path.api": True,})

## Add Images URI
        image_integration_response = apigw.IntegrationResponse(
            status_code="200",
            content_handling=apigw.ContentHandling.CONVERT_TO_BINARY,
            # response_templates={"text/html": "$input.path('$')"},
            response_parameters={"method.response.header.Content-Type": "'image/png'"})
        image_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[image_integration_response],
            request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"},
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'",
                "integration.request.path.image": "method.request.path.image"})
        images_resource_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            subdomain=bucket_name,
            path="images/{image}",
            options=image_integration_options)
        images_resource = api.root.add_resource("images")
        image_resource = images_resource.add_resource("{image}")
        images_get = image_resource.add_method("GET",
            images_resource_s3_integration,
            method_responses=[method_response],
            request_parameters={
                "method.request.path.image": True,})

## Fall Through
        folder = api.root.add_resource("{folder}")
        item = folder.add_resource("{item}")
        integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"text/html": "$input.path('$')"},
            response_parameters={"method.response.header.Content-Type": "'text/html'"})
        s3_proxy_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[integration_response],
            request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"},
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'",
                "integration.request.path.item": "method.request.path.item",
                "integration.request.path.folder": "method.request.path.folder"})
        s3_proxy_resource_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            subdomain=bucket_name,
            path="{folder}/{item}",
            options=s3_proxy_integration_options)
        item_get = item.add_method("GET",
            s3_proxy_resource_s3_integration,
            method_responses=[method_response],
            request_parameters={
                "method.request.path.item": True,
                "method.request.path.folder": True
            }
            )
Beispiel #11
0
    def appmesh(self):

        # This will create the app mesh (control plane)
        self.mesh = aws_appmesh.Mesh(self,
                                     "EcsWorkShop-AppMesh",
                                     mesh_name="ecs-mesh")

        # We will create a App Mesh Virtual Gateway
        self.mesh_vgw = aws_appmesh.VirtualGateway(
            self,
            "Mesh-VGW",
            mesh=self.mesh,
            listeners=[aws_appmesh.VirtualGatewayListener.http(port=3000)],
            virtual_gateway_name="ecsworkshop-vgw")

        # Creating the mesh gateway task for the frontend app
        # For more info related to App Mesh Proxy check https://docs.aws.amazon.com/app-mesh/latest/userguide/getting-started-ecs.html
        self.mesh_gw_proxy_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "mesh-gw-proxy-taskdef",
            cpu=256,
            memory_limit_mib=512,
            family="mesh-gw-proxy-taskdef",
        )

        # LogGroup for the App Mesh Proxy Task
        self.logGroup = aws_logs.LogGroup(
            self,
            "ecsworkshopMeshGateway",
            #log_group_name="ecsworkshop-mesh-gateway",
            retention=aws_logs.RetentionDays.ONE_WEEK)

        # App Mesh Virtual Gateway Envoy proxy Task definition
        # For a use specific ECR region, please check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy.html
        container = self.mesh_gw_proxy_task_def.add_container(
            "mesh-gw-proxy-contdef",
            image=aws_ecs.ContainerImage.from_registry(
                "public.ecr.aws/appmesh/aws-appmesh-envoy:v1.18.3.0-prod"),
            container_name="envoy",
            memory_reservation_mib=256,
            environment={
                "REGION": getenv('AWS_DEFAULT_REGION'),
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                # "ENABLE_ENVOY_XRAY_TRACING": "1",
                "APPMESH_RESOURCE_ARN": self.mesh_vgw.virtual_gateway_arn
            },
            essential=True,
            logging=aws_ecs.LogDriver.aws_logs(stream_prefix='/mesh-gateway',
                                               log_group=self.logGroup),
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/server_info | grep state | grep -q LIVE"
            ], ))

        # Default port where frontend app is listening
        container.add_port_mappings(aws_ecs.PortMapping(container_port=3000))

        #ammmesh-xray-uncomment
        # xray_container = self.mesh_gw_proxy_task_def.add_container(
        #     "FrontendServiceXrayContdef",
        #     image=aws_ecs.ContainerImage.from_registry("amazon/aws-xray-daemon"),
        #     logging=aws_ecs.LogDriver.aws_logs(
        #         stream_prefix='/xray-container',
        #         log_group=self.logGroup
        #     ),
        #     essential=True,
        #     container_name="xray",
        #     memory_reservation_mib=256,
        #     user="******"
        # )

        # container.add_container_dependencies(aws_ecs.ContainerDependency(
        #       container=xray_container,
        #       condition=aws_ecs.ContainerDependencyCondition.START
        #   )
        # )
        #ammmesh-xray-uncomment

        # For environment variables check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy-config.html
        self.mesh_gateway_proxy_fargate_service = aws_ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "MeshGW-Proxy-Fargate-Service",
            service_name='mesh-gw-proxy',
            cpu=256,
            memory_limit_mib=512,
            desired_count=1,
            listener_port=80,
            assign_public_ip=True,
            task_definition=self.mesh_gw_proxy_task_def,
            cluster=self.ecs_cluster,
            public_load_balancer=True,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=self.ecs_cluster.
                default_cloud_map_namespace,
                name='mesh-gw-proxy'))

        # For testing purposes we will open any ipv4 requests to port 3000
        self.mesh_gateway_proxy_fargate_service.service.connections.allow_from_any_ipv4(
            port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP,
                                    string_representation="vtw_proxy",
                                    from_port=3000,
                                    to_port=3000),
            description="Allow NLB connections on port 3000")

        self.mesh_gw_proxy_task_def.default_container.add_ulimits(
            aws_ecs.Ulimit(hard_limit=15000,
                           name=aws_ecs.UlimitName.NOFILE,
                           soft_limit=15000))

        #Adding necessary policies for Envoy proxy to communicate with required services
        self.mesh_gw_proxy_task_def.execution_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonEC2ContainerRegistryReadOnly"))
        self.mesh_gw_proxy_task_def.execution_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "CloudWatchLogsFullAccess"))

        self.mesh_gw_proxy_task_def.task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "CloudWatchFullAccess"))
        # self.mesh_gw_proxy_task_def.task_role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name("AWSXRayDaemonWriteAccess"))
        self.mesh_gw_proxy_task_def.task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSAppMeshEnvoyAccess"))

        self.mesh_gw_proxy_task_def.execution_role.add_to_policy(
            aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'],
                                    resources=['*']))

        core.CfnOutput(self,
                       "MeshGwNlbDns",
                       value=self.mesh_gateway_proxy_fargate_service.
                       load_balancer.load_balancer_dns_name,
                       export_name="MeshGwNlbDns")
        core.CfnOutput(self,
                       "MeshArn",
                       value=self.mesh.mesh_arn,
                       export_name="MeshArn")
        core.CfnOutput(self,
                       "MeshName",
                       value=self.mesh.mesh_name,
                       export_name="MeshName")
        core.CfnOutput(
            self,
            "MeshEnvoyServiceArn",
            value=self.mesh_gateway_proxy_fargate_service.service.service_arn,
            export_name="MeshEnvoyServiceArn")
        core.CfnOutput(self,
                       "MeshVGWArn",
                       value=self.mesh_vgw.virtual_gateway_arn,
                       export_name="MeshVGWArn")
        core.CfnOutput(self,
                       "MeshVGWName",
                       value=self.mesh_vgw.virtual_gateway_name,
                       export_name="MeshVGWName")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        vpc = ec2.Vpc(self, "SampleVPC",
                      max_azs=2)  # default is all AZs in region
        cluster = ecs.Cluster(self, "ServiceCluster", vpc=vpc)
        cluster.add_default_cloud_map_namespace(name="service.local")

        # two docker containers
        # two ECS services/tasks

        frontend_asset = DockerImageAsset(self,
                                          "frontend",
                                          directory="./frontend",
                                          file="Dockerfile")
        frontend_task = ecs.FargateTaskDefinition(
            self,
            "frontend-task",
            cpu=512,
            memory_limit_mib=2048,
        )
        frontend_task.add_container(
            "frontend",
            image=ecs.ContainerImage.from_docker_image_asset(frontend_asset),
            essential=True,
            environment={
                "LOCALDOMAIN": "service.local"
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="FrontendContainer",
                log_retention=logs.RetentionDays.ONE_WEEK,
            ),
        ).add_port_mappings(
            ecs.PortMapping(container_port=5000, host_port=5000))

        backend_task = ecs.FargateTaskDefinition(
            self,
            "backend-task",
            cpu=512,
            memory_limit_mib=2048,
        )
        backend_task.add_container(
            "backend",
            image=ecs.ContainerImage.from_registry("redis:alpine"),
            essential=True,
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="BackendContainer",
                log_retention=logs.RetentionDays.ONE_WEEK,
            ),
        ).add_port_mappings(
            ecs.PortMapping(container_port=6379, host_port=6379))

        frontend_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            id="frontend-service",
            service_name="frontend",
            cluster=cluster,  # Required
            cloud_map_options=ecs.CloudMapOptions(name="frontend"),
            cpu=512,  # Default is 256
            desired_count=2,  # Default is 1
            task_definition=frontend_task,
            memory_limit_mib=2048,  # Default is 512
            listener_port=80,
            public_load_balancer=True,
        )

        frontend_service.service.connections.allow_from_any_ipv4(
            ec2.Port.tcp(5000), "flask inbound")

        backend_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            id="backend-service",
            service_name="backend",
            cluster=cluster,  # Required
            cloud_map_options=ecs.CloudMapOptions(name="backend"),
            cpu=512,  # Default is 256
            desired_count=2,  # Default is 1
            task_definition=backend_task,
            memory_limit_mib=2048,  # Default is 512
            listener_port=6379,
            public_load_balancer=False,
        )

        backend_service.service.connections.allow_from(
            frontend_service.service, ec2.Port.tcp(6379))
class MLflowStack(core.Stack):

    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        ##
        ##Parametros gerais utilizados para provisioamento de infra
        ##
        project_name_param = core.CfnParameter(scope=self, id='mlflowStack', type='String', default='mlflowStack')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = 'mlflowbucket-track-stack'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        #Associação das policys gerenciadas a role que sera atribuida a task ECS.        
        role = iam.Role(scope=self, id='TASKROLE', assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonECS_FullAccess'))

        #Secrets Manager responsavel pelo armazenamento do password do nosso RDS MySQL
        db_password_secret = sm.Secret(
            scope=self,
            id='dbsecret',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True)
        )

         #Criação do Bucket S3
        artifact_bucket = s3.Bucket(
            scope=self,
            id='mlflowstacktrack',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY
        )

      #Obtenção de VPC para atribuição ao RDS
      dev_vpc = ec2.Vpc.from_vpc_attributes(
            self, '<VPC_NAME>',
            vpc_id = "<VPC_ID>",
            availability_zones = core.Fn.get_azs(),
            private_subnet_ids = ["PRIVATE_SUBNET_ID_1","PRIVATE_SUBNET_ID_2","PRIVATE_SUBNET_ID_3"]
       )

        # Adicionamos aqui para efeito de testes 0.0.0.0/0
        sg_rds = ec2.SecurityGroup(scope=self, id='SGRDS', security_group_name='sg_rds', vpc=dev_vpc)
        
        sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(port))

        # Criação da instancia RDS
        database = rds.DatabaseInstance(
            scope=self,
            id='MYSQL',
            database_name=db_name,
            port=port,
            credentials=rds.Credentials.from_username(username=username, password=db_password_secret.secret_value),
            engine=rds.DatabaseInstanceEngine.mysql(version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),            
            security_groups=[sg_rds],
            vpc=dev_vpc,            
            # multi_az=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False
        )

        #Criação do Cluster ECS
        cluster = ecs.Cluster(scope=self, id='CLUSTER', cluster_name=cluster_name)
        #Task Definition para Fargate
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id='MLflow',
            task_role=role,

        )
        #Criando nosso container com base no Dockerfile do MLflow
        container = task_definition.add_container(
            id='Container',
            image=ecs.ContainerImage.from_asset(
                directory='../MLflow/container',
                repository_name=container_repo_name
            ),
             #Atribuição Variaves ambiente
            environment={
                'BUCKET': f's3://{artifact_bucket.bucket_name}',
                'HOST': database.db_instance_endpoint_address,
                'PORT': str(port),
                'DATABASE': db_name,
                'USERNAME': username
            },
            #Secrets contendo o password do RDS MySQL
            secrets={
                'PASSWORD': ecs.Secret.from_secrets_manager(db_password_secret)
            }
        )
        #Port Mapping para exposição do Container MLflow
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id='MLFLOW',
            service_name=service_name,
            cluster=cluster,
            task_definition=task_definition
        )

        #Security group para ingress
        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4('0.0.0.0/0'),
            connection=ec2.Port.tcp(5000),
            description='Allow inbound for mlflow'
        )

        #Auto Scaling Policy para nosso balanceador
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id='AUTOSCALING',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60)
        )
     
        core.CfnOutput(scope=self, id='LoadBalancerDNS', value=fargate_service.load_balancer.load_balancer_dns_name)
Beispiel #14
0
    def create_fagate_NLB_autoscaling_custom(self, vpc, **kwargs):
        ####################
        # Unpack Value for name/ecr_repo
        app_name = kwargs['function'].replace("_", "-")
        task_name = "{}-task-definition".format(app_name)
        log_name = app_name
        image_name = "{}-image".format(app_name)
        container_name = "{}-container".format(app_name)
        service_name = "{}-service".format(app_name)

        app_ecr = kwargs['ecr']
        ecs_role = kwargs['ecs_role']

        ####################
        # Create Cluster
        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        ####################
        # Config IAM Role
        # add managed policy statement
        # ecs_base_role = iam.Role(
        #     self,
        #     "ecs_service_role",
        #     assumed_by=iam.ServicePrincipal("ecs.amazonaws.com")
        # )
        # ecs_role = ecs_base_role.from_role_arn(self, 'gw-ecr-role-test', role_arn='arn:aws:iam::002224604296:role/ecsTaskExecutionRole')

        ####################
        # Create Fargate Task Definition
        fargate_task = ecs.FargateTaskDefinition(self,
                                                 task_name,
                                                 execution_role=ecs_role,
                                                 task_role=ecs_role,
                                                 cpu=2048,
                                                 memory_limit_mib=8192)
        # 0. config log
        ecs_log = ecs.LogDrivers.aws_logs(stream_prefix=log_name)
        # 1. prepare ecr repository
        ecr_repo = ecr.Repository.from_repository_name(self,
                                                       id=image_name,
                                                       repository_name=app_ecr)
        farget_container = fargate_task.add_container(
            container_name,
            image=ecs.ContainerImage.from_ecr_repository(ecr_repo),
            logging=ecs_log,
            environment={
                'KG_PATH': "s3://autorec-1",
                "REDIS_URL": self.redis_host,
                "REDIS_PORT": self.redis_port
            })
        # 2. config port mapping
        port_mapping = ecs.PortMapping(container_port=9008,
                                       host_port=9008,
                                       protocol=ecs.Protocol.TCP)
        farget_container.add_port_mappings(port_mapping)

        ####################
        # Config NLB service
        # fargate_service = ecs.FargateService(self, 'graph-inference-service',
        #     cluster=cluster, task_definition=fargate_task, assign_public_ip=True
        # )
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            service_name,
            cluster=cluster,
            task_definition=fargate_task,
            assign_public_ip=True,
            desired_count=20,
            listener_port=9008)
        # 0. allow inbound in sg
        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(
                # peer = ec2.Peer.ipv4(vpc.vpc_cidr_block),
                peer=ec2.Peer.ipv4('0.0.0.0/0'),
                connection=ec2.Port.tcp(9008),
                description="Allow http inbound from VPC")

        # 1. setup autoscaling policy
        # autoscaling 自动scale
        #         scaling = fargate_service.service.auto_scale_task_count(
        #             max_capacity=50
        #         )
        #         scaling.scale_on_cpu_utilization(
        #             "CpuScaling",
        #             target_utilization_percent=50,
        #             scale_in_cooldown=core.Duration.seconds(60),
        #             scale_out_cooldown=core.Duration.seconds(60),
        #         )

        return fargate_service.load_balancer.load_balancer_dns_name
Beispiel #15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # ==============================
        # ======= CFN PARAMETERS =======
        # ==============================
        project_name_param = core.CfnParameter(scope=self, id='ProjectName', type='String')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = f'{project_name_param.value_as_string}-artifacts-{core.Aws.ACCOUNT_ID}'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        # ==================================================
        # ================= IAM ROLE =======================
        # ==================================================
        role = iam.Role(scope=self, id='TASKROLE', assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess'))
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonECS_FullAccess'))

        # ==================================================
        # ================== SECRET ========================
        # ==================================================
        db_password_secret = sm.Secret(
            scope=self,
            id='DBSECRET',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True)
        )
        

        # ==================================================
        # ==================== VPC =========================
        # ==================================================
        #public_subnet = ec2.SubnetConfiguration(name='Public', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=28)
        #dev-shared-public-subnet-az1
        #private_subnet = ec2.SubnetConfiguration(name='Private', subnet_type=ec2.SubnetType.PRIVATE, cidr_mask=28)
        #dev-shared-private-subnet-az1
        #isolated_subnet = ec2.SubnetConfiguration(name='DB', subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=28) 
        #dev-shared-private-subnet-az1

        #use existing (is needed later for fargete)
        """ vpc = ec2.Vpc(
            scope=self,
            id='VPC',
            cidr='10.0.0.0/24',
            max_azs=2,
            nat_gateway_provider=ec2.NatProvider.gateway(),
            nat_gateways=1,
            subnet_configuration=[public_subnet, private_subnet, isolated_subnet]
        ) """

        """ stack = MyStack(
            app, "MyStack", env=Environment(account="account_id", region="region")
        ) """

        vpc = ec2.Vpc.from_lookup(self, "VPC",
            vpc_id = "vpc-03076add1b1efca31" #is_default=True
        ) #TODO: fill in correct arguments
        #vpc_id = "vpc-03076add1b1efca31"

        #leave, should be fine, if not check (is nto NAT gateway)

        #original: vpc.add_gateway_endpoint('S3Endpoint', service=ec2.GatewayVpcEndpointAwsService.S3)
        # ==================================================
        # ================= S3 BUCKET ======================
        # ==================================================
        artifact_bucket = s3.Bucket(
            scope=self,
            id='ARTIFACTBUCKET',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY
        )
        # # ==================================================
        # # ================== DATABASE  =====================
        # # ==================================================
        # Creates a security group for AWS RDS
        sg_rds = ec2.SecurityGroup(scope=self, id='SGRDS', vpc=vpc, security_group_name='sg_rds')
        # Adds an ingress rule which allows resources in the VPC's CIDR to access the database.
        #original: sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('10.0.0.0/24'), connection=ec2.Port.tcp(port))
        sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('10.206.192.0/19'), connection=ec2.Port.tcp(port))
        #10.206.192.0/19

        database = rds.DatabaseInstance(
            scope=self,
            id='MYSQL',
            database_name=db_name,
            port=port,
            credentials=rds.Credentials.from_username(username=username, password=db_password_secret.secret_value),
            engine=rds.DatabaseInstanceEngine.mysql(version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            security_groups=[sg_rds],
            #vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.ISOLATED),
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), #TODO: check if you need to select private here and how
            # multi_az=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False
        )
        # ==================================================
        # =============== FARGATE SERVICE ==================
        # ==================================================
        
        cluster = ecs.Cluster(scope=self, id='CLUSTER', cluster_name=cluster_name, vpc=vpc)

        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id='MLflow',
            task_role=role,

        )

        container = task_definition.add_container(
            id='Container',
            image=ecs.ContainerImage.from_asset(
                directory='container',
                repository_name=container_repo_name
            ),
            environment={
                'BUCKET': f's3://{artifact_bucket.bucket_name}',
                'HOST': database.db_instance_endpoint_address,
                'PORT': str(port),
                'DATABASE': db_name,
                'USERNAME': username
            },
            secrets={
                'PASSWORD': ecs.Secret.from_secrets_manager(db_password_secret)
            }
        )
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id='MLFLOW',
            service_name=service_name,
            cluster=cluster,
            task_definition=task_definition
        )

        # Setup security group
        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(5000),
            description='Allow inbound from VPC for mlflow'
        )

        # Setup autoscaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id='AUTOSCALING',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60)
        )
        # ==================================================
        # =================== OUTPUTS ======================
        # ==================================================
        core.CfnOutput(scope=self, id='LoadBalancerDNS', value=fargate_service.load_balancer.load_balancer_dns_name)
Beispiel #16
0
    def __init__(self, scope: cdk.Construct, construct_id: str, vpc,
                 ecr_repository, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create a Fargate Cluster
        self.ecs_cluster = ecs.Cluster(self,
                                       'WebEc2Cluster',
                                       cluster_name='MythicalMysfits-Cluster',
                                       vpc=vpc)

        self.ecs_cluster.connections.allow_from_any_ipv4(ec2.Port.tcp(8080))

        self.ecs_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            'MythicalMysfits-FargateService',
            service_name='MythicalMysfits-FargateService',
            cluster=self.ecs_cluster,
            desired_count=1,
            public_load_balancer=True,
            min_healthy_percent=1,
            task_image_options=ecs_patterns.
            NetworkLoadBalancedTaskImageOptions(
                enable_logging=True,
                container_name='MythicalMysfits-Service',
                container_port=8080,
                image=ecs.ContainerImage.from_ecr_repository(ecr_repository)))

        self.ecs_service.service.connections.allow_from(
            ec2.Peer.ipv4(vpc.vpc_cidr_block), ec2.Port.tcp(8080))

        task_definition_policy_stm = _iam.PolicyStatement()
        task_definition_policy_stm.add_actions(
            # Rules which allow ECS to attach network interfaces to instances on your behalf in order for awsvpc networking mode to work right
            "ec2:AttachNetworkInterface",
            "ec2:CreateNetworkInterface",
            "ec2:CreateNetworkInterfacePermission",
            "ec2:DeleteNetworkInterface",
            "ec2:DeleteNetworkInterfacePermission",
            "ec2:Describe*",
            "ec2:DetachNetworkInterface",
            # Rules which allow ECS to update load balancers on your behalf with the information about how to send traffic to your containers
            "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
            "elasticloadbalancing:DeregisterTargets",
            "elasticloadbalancing:Describe*",
            "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
            "elasticloadbalancing:RegisterTargets",
            # Rules which allow ECS to run tasks that have IAM roles assigned to them.
            "iam:PassRole",
            # Rules that let ECS create and push logs to CloudWatch.
            "logs:DescribeLogStreams",
            "logs:CreateLogGroup",
        )
        task_definition_policy_stm.add_all_resources()
        self.ecs_service.service.task_definition.add_to_execution_role_policy(
            task_definition_policy_stm)

        task_role_policy_stm = _iam.PolicyStatement()
        task_role_policy_stm.add_actions(
            # Allow the ECS Tasks to download images from ECR
            "ecr:GetAuthorizationToken",
            "ecr:BatchCheckLayerAvailability",
            "ecr:GetDownloadUrlForLayer",
            "ecr:BatchGetImage",
            # Allow the ECS tasks to upload logs to CloudWatch
            "logs:CreateLogStream",
            "logs:CreateLogGroup",
            "logs:PutLogEvents",
            # Allow the ECS tasks to access the DynamoDB table to populate it upon startup.
            "dynamodb:*")
        task_role_policy_stm.add_all_resources()
        self.ecs_service.service.task_definition.add_to_task_role_policy(
            task_role_policy_stm)

        cdk.CfnOutput(
            self,
            "WebLoadBalancerDNS",
            value=self.ecs_service.load_balancer.load_balancer_dns_name)

        cdk.CfnOutput(self,
                      "WebLoadBalancerARN",
                      value=self.ecs_service.load_balancer.load_balancer_arn)
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        dockerImageAsset = DockerImageAsset(
            self,
            f"{APPLICATION_NAME}",
            directory="../",
            file="Dockerfile",
            exclude=["cdk/node_modules", ".git", "cdk/cdk.out"],
        )

        vpc = ec2.Vpc(self, f"{APPLICATION_NAME}VPC", max_azs=3)
        cluster = ecs.Cluster(self, f"{APPLICATION_NAME}Cluster", vpc=vpc)

        app_task = ecs.FargateTaskDefinition(
            self,
            f"{APPLICATION_NAME}-task",
            family=f"{APPLICATION_NAME}-family",
            cpu=512,
            memory_limit_mib=2048,
        )

        dd_api_key = ssm.StringParameter.value_for_string_parameter(
            self, "/datadog/snyk_demo/dd_api_key", 1)

        DATADOG_AGENT_VARS["DD_API_KEY"] = dd_api_key

        java_service_container = app_task.add_container(
            f"{APPLICATION_NAME}-java-app",
            image=ecs.ContainerImage.from_docker_image_asset(dockerImageAsset),
            essential=True,
            docker_labels={
                "com.datadoghq.ad.instances":
                '[{"host": "%%host%%", "port": 6379}]',
                "com.datadoghq.ad.check_names": '["tomcat"]',
                "com.datadoghq.ad.init_configs": "[{}]",
            },
            environment=APP_ENV_VARS,
            logging=ecs.LogDrivers.firelens(
                options={
                    "Name": "datadog",
                    "Host": "http-intake.logs.datadoghq.com",
                    "TLS": "on",
                    "apikey": dd_api_key,
                    "dd_service": DD_SERVICE,
                    "dd_source": "tomcat",
                    "dd_tags": DD_TAGS,
                    "provider": "ecs",
                }),
        )

        datadog_agent_container = app_task.add_container(
            f"{APPLICATION_NAME}-datadog-agent",
            image=ecs.ContainerImage.from_registry(
                name="datadog/agent:latest"),
            essential=True,
            environment=DATADOG_AGENT_VARS,
        )

        # Port exposure for the containerized app
        java_service_container.add_port_mappings(
            ecs.PortMapping(container_port=8080, host_port=8080))

        # Mandatory port exposure for the Datadog agent
        datadog_agent_container.add_port_mappings(
            ecs.PortMapping(container_port=8126, host_port=8126))

        app_task_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            id=f"{APPLICATION_NAME}-service",
            service_name=f"{APPLICATION_NAME}",
            cluster=cluster,  # Required
            cpu=512,  # Default is 256
            desired_count=1,  # Default is 1
            task_definition=app_task,
            memory_limit_mib=2048,  # Default is 512
            listener_port=80,
            public_load_balancer=True,
            health_check_grace_period=core.Duration.seconds(120),
        )

        # Security Group to allow load balancer to communicate with ECS Containers.
        app_task_service.service.connections.allow_from_any_ipv4(
            ec2.Port.tcp(8080), f"{APPLICATION_NAME} app inbound")
Beispiel #18
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Domain name to redirect
        domain_name = core.CfnParameter(
            self,
            "domainName",
            type="String",
            description="Domain name to redirect",
        )

        # Here we use a specific certificate from parameter values
        cert_arn = core.CfnParameter(
            self,
            "certArn",
            type="String",
            description=
            "Certificate ARN of for the redirection (has to be in us-east-1",
        )

        image_tag = core.CfnParameter(
            self,
            "imageTag",
            type="String",
            description="Image tag to deploy as container",
        )

        processed_bucket_name = core.CfnParameter(
            self,
            "processedBucket",
            type="String",
            description="Name of the S3 bucket holding the processed data",
        )

        cookie_secret = core.CfnParameter(
            self,
            "cookieSecret",
            type="String",
            description=
            "The secret value to encrypt the login cookies by the dashboard.",
        )
        # End: Input variables

        # Create VPC and Fargate Cluster
        # NOTE: Limit AZs to avoid reaching resource quotas
        vpc = ec2.Vpc(self, "DashboardVPC", max_azs=2)

        cluster = ecs.Cluster(self,
                              "DashboardCluster",
                              vpc=vpc,
                              container_insights=True)

        repository = ecr.Repository(self,
                                    "DashboardRepository",
                                    repository_name="nccid-dashboard")
        secret = secretsmanager.Secret(self,
                                       "DashboardSecret",
                                       secret_name="nccid-dashboard-secrets")
        service_secret = ecs.Secret.from_secrets_manager(secret)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "DashboardService",
            cluster=cluster,
            task_image_options={
                "image":
                ecs.ContainerImage.from_ecr_repository(
                    repository=repository, tag=image_tag.value_as_string),
                "secrets": [service_secret],
                "environment": {
                    "AWS_PROCESSED_BUCKET":
                    processed_bucket_name.value_as_string,
                    "COOKIE_SECRET_KEY": cookie_secret.value_as_string,
                    "DASHBOARD_DOMAIN": domain_name.value_as_string,
                },
            },
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            # See values for these entries at https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs_patterns/NetworkLoadBalancedFargateService.html#networkloadbalancedfargateservice
            cpu=256,  # .25 vCPU
            memory_limit_mib=512,  # 0.5 GB
        )

        processed_bucket = s3.Bucket.from_bucket_name(
            self,
            id="ProcessedBucket",
            bucket_name=processed_bucket_name.value_as_string,
        )
        processed_bucket.grant_read(fargate_service.task_definition.task_role)

        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(
                peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                connection=ec2.Port.tcp(80),
                description="Allow HTTP inbound from VPC",
            )

        cert = _acm.Certificate.from_certificate_arn(self, "cert",
                                                     cert_arn.value_as_string)

        origin = _origins.HttpOrigin(
            domain_name=fargate_service.load_balancer.load_balancer_dns_name,
            protocol_policy=_cloudfront.OriginProtocolPolicy.HTTP_ONLY,
        )
        behaviour = _cloudfront.BehaviorOptions(
            origin=origin,
            allowed_methods=_cloudfront.AllowedMethods.ALLOW_ALL,
            cache_policy=_cloudfront.CachePolicy.CACHING_DISABLED,
            origin_request_policy=_cloudfront.OriginRequestPolicy.ALL_VIEWER,
            viewer_protocol_policy=_cloudfront.ViewerProtocolPolicy.
            REDIRECT_TO_HTTPS,
        )

        distribution = _cloudfront.Distribution(
            self,
            "nccid-dasboard-dist",
            default_behavior=behaviour,
            certificate=cert,
            domain_names=[domain_name.value_as_string],
        )
        # Explicit dependency setup
        distribution.node.add_dependency(fargate_service.load_balancer)

        core.CfnOutput(
            self,
            "nccidCloudfrontDistribution",
            value=distribution.distribution_domain_name,
            description="Cloudfront domain to set the CNAME for.",
        )
    def __init__(self, scope: core.Construct, id: str, props: EcsStackProps,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self._ecs_service = None

        self._ecs_cluster = aws_ecs.Cluster(
            self,
            "Cluster",
            cluster_name="MythicalMysfits-Cluster",
            vpc=props.vpc)
        self._ecs_cluster.connections.allow_from_any_ipv4(
            aws_ec2.Port.tcp(8080))

        task_image_options = aws_ecs_patterns.NetworkLoadBalancedTaskImageOptions(
            container_port=8080,
            image=aws_ecs.ContainerImage.from_ecr_repository(
                props.ecr_repository),
        )
        self._ecs_service = aws_ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "Service",
            cluster=self._ecs_cluster,
            task_image_options=task_image_options,
        )
        self._ecs_service.service.connections.allow_from(
            aws_ec2.Peer.ipv4(props.vpc.vpc_cidr_block),
            aws_ec2.Port.tcp(8080))

        task_definition_policy = aws_iam.PolicyStatement()
        task_definition_policy.add_actions(
            # Rules which allow ECS to attach network interfaces to instances
            # on your behalf in order for awsvpc networking mode to work right
            "ec2:AttachNetworkInterface",
            "ec2:CreateNetworkInterface",
            "ec2:CreateNetworkInterfacePermission",
            "ec2:DeleteNetworkInterface",
            "ec2:DeleteNetworkInterfacePermission",
            "ec2:Describe*",
            "ec2:DetachNetworkInterface",
            # Rules which allow ECS to update load balancers on your behalf
            #  with the information sabout how to send traffic to your containers
            "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
            "elasticloadbalancing:DeregisterTargets",
            "elasticloadbalancing:Describe*",
            "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
            "elasticloadbalancing:RegisterTargets",
            # Rules which allow ECS to run tasks that have IAM roles assigned to them.
            "iam:PassRole",
            # Rules that let ECS create and push logs to CloudWatch.
            "logs:DescribeLogStreams",
            "logs:CreateLogGroup",
        )
        task_definition_policy.add_all_resources()
        self._ecs_service.service.task_definition.add_to_execution_role_policy(
            task_definition_policy)

        task_role_policy = aws_iam.PolicyStatement()
        task_role_policy.add_actions(
            # Allow the ECS Tasks to download images from ECR
            "ecr:GetAuthorizationToken",
            "ecr:BatchCheckLayerAvailability",
            "ecr:GetDownloadUrlForLayer",
            "ecr:BatchGetImage",
            # Allow the ECS tasks to upload logs to CloudWatch
            "logs:CreateLogStream",
            "logs:CreateLogGroup",
            "logs:PutLogEvents",
        )
        task_role_policy.add_all_resources()
        self._ecs_service.service.task_definition.add_to_task_role_policy(
            task_definition_policy)
Beispiel #20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc.from_lookup(self,
                                  'covar-vpc',
                                  vpc_name='dev-base-infrastructure/vpc')

        cluster = ecs.Cluster.from_cluster_attributes(
            self,
            'covar-service-cluster',
            cluster_name='covar-service-cluster',
            vpc=vpc,
            security_groups=[])

        lb = elb.ApplicationLoadBalancer(
            self,
            "LoadBalancer",
            vpc=vpc,
            internet_facing=True,
        )

        stac_api = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "stac-api",
            cluster=cluster,
            domain_zone=route53.HostedZone.from_lookup(
                self, "HostedZone", domain_name='tessellata.net'),
            domain_name=
            'api.tessellata.net',  # NOTE: this creates the route 53 record!!
            certificate=certs.Certificate.from_certificate_arn(
                self,
                'domain-cert',
                certificate_arn=
                'arn:aws:acm:us-east-1:138863487738:certificate/2832b798-d241-4d89-8b11-bdc9a377f173'
            ),
            load_balancer=lb,
            public_load_balancer=True,
            protocol=elb.ApplicationProtocol.HTTPS,
            redirect_http=True,
            cpu=1024,
            memory_limit_mib=2048,
            desired_count=2,
            # cloud_map_options=ecs.CloudMapOptions(
            #     cloud_map_namespace=namespace, name="mgmtapi"
            # ),
            task_image_options=ecs_patterns.
            ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset("./stac-api/backend/"),
                container_port=8080,
                environment={
                    "STAC_URL":
                    "http://api.tessellata.net",  # NOTE: this record gets created dynamically.
                    "TITILER_ENDPOINT":
                    "https://titiler.tessellata.net",
                    "PORT":
                    "27017",
                    "USER":
                    os.getenv("MONGO_USER"),
                    "PASSWORD":
                    os.getenv("MONGO_PASS"),
                    "HOST":
                    "stac-monog-1ADLUZEF6X1HP-70e2bcd902084e54.elb.us-east-1.amazonaws.com",
                },
                enable_logging=True,
            ),
        )

        mongodb = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            'monogdb-service',
            cpu=2048,
            memory_limit_mib=4096,
            # task_definition=,
            task_image_options=ecs_patterns.
            NetworkLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry('mongo'),
                container_port=27017,
                environment={
                    "MONGO_INITDB_ROOT_USERNAME":
                    os.getenv("MONGO_USER"),  # TODO make more secure.
                    "MONGO_INITDB_ROOT_PASSWORD":
                    os.getenv("MONGO_PASS")  # TODO make more secure.
                },
                enable_logging=True,
            ),
            cluster=cluster,
            desired_count=1,
            listener_port=27017,
        )
        # cluster = ecs.Cluster(self, f"{COMPONENT_PREFIX}", vpc=vpc)

        # Test receiver (server)
        nlb_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            f"{COMPONENT_PREFIX}Service",
            # cluster=cluster,
            task_image_options={
                "image":
                ecs.ContainerImage.from_asset(
                    path.join(dirname, "../../container")),
                "container_port":
                server_port,
                "enable_logging":
                True,
                "environment": {
                    "S3_BUCKET_NAME": test_server_output_bucket.bucket_name,
                    "PORT_NUMBER": str(server_port),
                },
                "container_name":
                "hl7server",
            },
            desired_count=1,
            listener_port=server_port,
            public_load_balancer=False,
            vpc=vpc,
        )
        service = nlb_service.service
        connections = service.connections
        connections.allow_from(
            ec2.Peer.ipv4(vpc.vpc_cidr_block),
Beispiel #22
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create a VPC
        # NOTE: Limit AZs to avoid reaching resource quotas
        vpc = ec2.Vpc(self, "WebVpc", max_azs=2, nat_gateways=1)

        # # FOR INCREASED SECURITY
        # # Create a VPC endpoint for DynamoDB and associate a policy
        # dynamodb_endpoint = vpc.add_gateway_endpoint(
        #     'DynamoDbEndpoint',
        #     service=ec2.GatewayVpcEndpointAwsService.DYNAMODB,
        #     subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)]
        # )
        #
        # dynamodb_policy = _iam.PolicyStatement()
        # dynamodb_policy.add_any_principal()
        # dynamodb_policy.add_actions('*')
        # dynamodb_policy.add_all_resources()
        # dynamodb_endpoint.add_to_policy(dynamodb_policy)

        ecs_task_inline_policy = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Sid":
                "AllActionsOnTable",
                "Action": [
                    "dynamodb:*",
                ],
                "Effect":
                "Allow",
                "Resource": [
                    "arn:aws:dynamodb:" + self.region + ":" + self.account +
                    ":table/MysfitsTable", "arn:aws:dynamodb:" + self.region +
                    ":" + self.account + ":table/MysfitsTable/index/*"
                ]
            }]
        }

        # Create a Fargate Cluster
        cluster = ecs.Cluster(self, 'WebEc2Cluster', vpc=vpc)

        ecs_access_role = _iam.Role.from_role_arn(scope=self,
                                                  id="ECSAccessRole",
                                                  role_arn="arn:aws:iam::" +
                                                  self.account +
                                                  ":role/ecsTaskExecutionRole",
                                                  mutable=False)

        ecs_task_role = _iam.Role.from_role_arn(scope=self,
                                                id="ECSTaskRole",
                                                role_arn="arn:aws:iam::" +
                                                self.account +
                                                ":role/ecsTaskExecutionRole",
                                                mutable=True)

        # # A way to add the policy
        # ecs_task_role.add_to_policy(_iam.PolicyStatement(
        #     actions=["dynamodb:*"],
        #     resources=["arn:aws:dynamodb:" + self.region + ":" + self.account + ":table/MysfitsTable"],
        #     effect=_iam.Effect.ALLOW
        # ))

        # Alternative way to add the policy
        ecs_task_inline_policy_document = _iam.PolicyDocument.from_json(
            ecs_task_inline_policy)
        ecs_task_policy = _iam.Policy(self,
                                      "ECSTaskPolicy",
                                      document=ecs_task_inline_policy_document)
        ecs_task_role.attach_inline_policy(ecs_task_policy)

        task_image_options = ecs_patterns.NetworkLoadBalancedTaskImageOptions(
            image=ecs.ContainerImage.from_registry(
                self.account + ".dkr.ecr." + self.region +
                ".amazonaws.com/mythicalmysfits/webservice"),
            container_port=8080,
            execution_role=ecs_access_role,
            enable_logging=True,
            task_role=ecs_task_role)

        self.fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "MythicalMysfits-Service",
            cluster=cluster,
            cpu=256,
            memory_limit_mib=512,
            public_load_balancer=True,
            task_image_options=task_image_options)

        self.fargate_service.service.connections.security_groups[
            0].add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=ec2.Port.tcp(8080),
                                description="Allow http inbound from VPC")

        core.CfnOutput(
            self,
            "WebLoadBalancerDNS",
            value=self.fargate_service.load_balancer.load_balancer_dns_name)
        # self.lb_dns_name = core.CfnOutput(
        #     self, "WebLoadBalancerDNS",
        #     value=self.fargate_service.load_balancer.load_balancer_dns_name,
        #     export_name="lb-dns-name"
        # ).import_value

        core.CfnOutput(
            self,
            "WebLoadBalancerARN",
            value=self.fargate_service.load_balancer.load_balancer_arn)