Пример #1
0
    def __init__(self, scope: core.Construct, id: str, config: ContainerPipelineConfiguration, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(self, "TheVPC",
                      cidr="10.0.0.0/16",
                      nat_gateways=1,
                      )

        # IAM roles
        service_task_def_exe_role = iam.Role(
            self, "ServiceTaskDefExecutionRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
            )

        service_task_def_exe_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonECSTaskExecutionRolePolicy'))

        service_task_def_role = iam.Role(
            self, 'ServiceTaskDefTaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        )

        # Fargate cluster
        cluster = ecs.Cluster(
            scope=self,
            id="ecs-cluster",
            cluster_name=config.ProjectName + "-" + config.stage,
            vpc=vpc
        )

        load_balancer = elbv2.ApplicationLoadBalancer(
            self, "load_balancer",
            vpc=vpc,
            internet_facing=True
        )

        # Security Group
        service_sg = ec2.SecurityGroup(self, "service_sg", vpc=vpc)
        service_sg.connections.allow_from(load_balancer, ec2.Port.tcp(80));

        # ECR Repo
        image_repo = ecr.Repository.from_repository_name(self, "image_repo",
                                                         repository_name=config.ProjectName
                                                         )

        log_group = logs.LogGroup(self, "log_group",
                                  log_group_name=config.ProjectName + "-" + config.stage,
                                  removal_policy=core.RemovalPolicy.DESTROY,
                                  retention=None
                                  )
        # ECS Task Def
        fargate_task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id="fargate_task_definition",
            cpu=1024,
            memory_limit_mib=2048,
            execution_role=service_task_def_exe_role,
            task_role=service_task_def_role,
            family=config.ProjectName + "-" + config.stage
        )

        container = fargate_task_definition.add_container(
            id="fargate_task_container",
            image=ecs.ContainerImage.from_ecr_repository(repository=image_repo, tag='release')
        )

        container.add_port_mappings(ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP))

        # ECS Fargate Service
        fargate_service = ecs.FargateService(
            scope=self,
            id="fargate_service",
            security_group=service_sg,
            cluster=cluster,
            desired_count=2,
            deployment_controller=ecs.DeploymentController(type=ecs.DeploymentControllerType.ECS),
            task_definition=fargate_task_definition,
            service_name=config.ProjectName + "-" + config.stage
        )

        # Main Env
        listern_health_check_main = elbv2.HealthCheck(
            healthy_http_codes='200',
            interval=core.Duration.seconds(5),
            healthy_threshold_count=2,
            unhealthy_threshold_count=3,
            timeout=core.Duration.seconds(4)
        )
        # Test Env
        listern_health_check_test = elbv2.HealthCheck(
            healthy_http_codes='200',
            interval=core.Duration.seconds(5),
            healthy_threshold_count=2,
            unhealthy_threshold_count=3,
            timeout=core.Duration.seconds(4)
        )

        listener_main = load_balancer.add_listener("load_balancer_listener_1",
                                                   port=80,
                                                   )

        listern_main_targets = listener_main.add_targets("load_balancer_target_1", port=8080,
                                                         health_check=listern_health_check_main,
                                                         targets=[fargate_service]
                                                         )

        # Alarms: monitor 500s on target group
        aws_cloudwatch.Alarm(self, "TargetGroup5xx",
                             metric=listern_main_targets.metric_http_code_target(elbv2.HttpCodeTarget.TARGET_5XX_COUNT),
                             threshold=1,
                             evaluation_periods=1,
                             period=core.Duration.minutes(1)
                             )

        # Alarms: monitor unhealthy hosts on target group
        aws_cloudwatch.Alarm(self, "TargetGroupUnhealthyHosts",
                             metric=listern_main_targets.metric('UnHealthyHostCount'),
                             threshold=1,
                             evaluation_periods=1,
                             period=core.Duration.minutes(1)
                             )

        core.CfnOutput(self, "lburl",
                       value=load_balancer.load_balancer_dns_name,
                       export_name="LoadBalancerUrl"
                       )
Пример #2
0
from aws_cdk import (aws_autoscaling as autoscaling, aws_ec2 as ec2,
                     aws_elasticloadbalancingv2 as elbv2, aws_ecs as ecs, App,
                     CfnOutput, Duration, Stack)

app = App()
stack = Stack(app, "aws-ec2-integ-ecs")

# Create a cluster
vpc = ec2.Vpc(stack, "MyVpc", max_azs=2)

cluster = ecs.Cluster(stack, 'EcsCluster', vpc=vpc)

asg = autoscaling.AutoScalingGroup(
    stack,
    "DefaultAutoScalingGroup",
    instance_type=ec2.InstanceType.of(ec2.InstanceClass.STANDARD5,
                                      ec2.InstanceSize.MICRO),
    machine_image=ecs.EcsOptimizedImage.amazon_linux2(),
    vpc=vpc,
)
capacity_provider = ecs.AsgCapacityProvider(stack,
                                            "AsgCapacityProvider",
                                            auto_scaling_group=asg)
cluster.add_asg_capacity_provider(capacity_provider)

# Create Task Definition
task_definition = ecs.Ec2TaskDefinition(stack, "TaskDef")
container = task_definition.add_container(
    "web",
    image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"),
    memory_limit_mib=256)
Пример #3
0
    def __init__(self, scope: core.Construct, id: str, *, app_env: str, vpc: ec2.Vpc, rds: rds.CfnDBInstance,
                 repository: ecr.Repository, log_group: logs.LogGroup, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster = ecs.Cluster(self, 'staff-cluster', cluster_name=f'staff-cluster-{app_env}', vpc=vpc)

        # task definition
        task_definition = ecs.FargateTaskDefinition(self, f'staff-task-def-{app_env}', cpu=512, memory_limit_mib=1024)
        admin_url = _.from_string_parameter_name(self, "admin_url", f'/{app_env}/ssm/ADMIN_URL')
        db_name = _.from_string_parameter_name(self, "database_name", f'/{app_env}/ssm/DATABASE_NAME')
        test_db_name = _.from_string_parameter_name(self, "test_database_name", f'/{app_env}/ssm/TEST_DATABASE_NAME')
        db_user = _.from_string_parameter_name(self, "database_user", f'/{app_env}/ssm/DATABASE_USER')
        db_pass = _.from_string_parameter_name(self, "database_pass", f'/{app_env}/ssm/DATABASE_PASSWORD')
        db_port = _.from_string_parameter_name(self, "database_port", f'/{app_env}/ssm/DATABASE_PORT')
        email_host_user = _.from_string_parameter_name(self, "email_host_user", f'/{app_env}/ssm/EMAIL_HOST_USER')
        email_host_pass = _.from_string_parameter_name(self, "email_host_pass", f'/{app_env}/ssm/EMAIL_HOST_PASSWORD')
        broker_url = _.from_string_parameter_name(self, "celery_broker_url", f'/{app_env}/ssm/CELERY_BROKER_URL')
        backend = _.from_string_parameter_name(self, "celery_result_backend", f'/{app_env}/ssm/CELERY_RESULT_BACKEND')
        log_level = _.from_string_parameter_name(self, "celery_log_level", f'/{app_env}/ssm/CELERY_LOG_LEVEL')
        gen_queue = _.from_string_parameter_name(self, "celery_general_queue", f'/{app_env}/ssm/CELERY_GENERAL_QUEUE')
        report_queue = _.from_string_parameter_name(self, "celery_report_queue", f'/{app_env}/ssm/CELERY_REPORT_QUEUE')
        system_email = _.from_string_parameter_name(self, "system_email", f'/{app_env}/ssm/SYSTEM_EMAIL')
        system_password = _.from_string_parameter_name(self, "system_password", f'/{app_env}/ssm/SYSTEM_PASSWORD')

        task_definition_params = {
            'image': ecs.ContainerImage.from_ecr_repository(repository, tag=app_env),
            'environment': {
                    'ENV': f'{app_env}',
                    'PROJECT': 'ssm',
                    'PYTHONPATH': '/var/staff/ssm/',
                    'CORS_ORIGIN_ALLOW_ALL': 'True',
                    'DJANGO_SETTINGS_MODULE': 'ssm.settings',
                    'DATABASE_HOST': rds.attr_endpoint_address,
             },
            'secrets': {
                    'ADMIN_URL': ecs.Secret.from_ssm_parameter(admin_url),
                    'DATABASE_NAME': ecs.Secret.from_ssm_parameter(db_name),
                    'TEST_DATABASE_NAME': ecs.Secret.from_ssm_parameter(test_db_name),
                    'DATABASE_USER': ecs.Secret.from_ssm_parameter(db_user),
                    'DATABASE_PASSWORD': ecs.Secret.from_ssm_parameter(db_pass),
                    'DATABASE_PORT': ecs.Secret.from_ssm_parameter(db_port),
                    'EMAIL_HOST_USER': ecs.Secret.from_ssm_parameter(email_host_user),
                    'EMAIL_HOST_PASSWORD': ecs.Secret.from_ssm_parameter(email_host_pass),
                    'CELERY_BROKER_URL': ecs.Secret.from_ssm_parameter(broker_url),
                    'CELERY_RESULT_BACKEND': ecs.Secret.from_ssm_parameter(backend),
                    'CELERY_LOG_LEVEL': ecs.Secret.from_ssm_parameter(log_level),
                    'CELERY_GENERAL_QUEUE': ecs.Secret.from_ssm_parameter(gen_queue),
                    'CELERY_REPORT_QUEUE': ecs.Secret.from_ssm_parameter(report_queue),
                    'SYSTEM_EMAIL': ecs.Secret.from_ssm_parameter(system_email),
                    'SYSTEM_PASSWORD': ecs.Secret.from_ssm_parameter(system_password),
             },
            'logging': ecs.LogDriver(stream_prefix='ecs-', log_group=log_group)
        }
        container = task_definition.add_container("web", **task_definition_params)
        container.add_port_mappings(container_port=80)

        # fargate service
        service_sg_params = {
            'security_group_name': f'staff-fargate-service-security-group-{app_env}',
            'description': f'staff balancer security group for {app_env}',
            'vpc': vpc,
        }
        service_sc = ec2.SecurityGroup(self, f'staff-balancer-security-group', **service_sg_params)
        service_sc.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), 'allow 80 port access from the world')
        service_sc.add_egress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), '')
        service_params = {
            'service_name': 'staff-fargate-service',
            'cluster': cluster,
            'task_definition': task_definition,
            'desired_count': 1,
            'assign_public_ip': True,
            'security_group': service_sc,
            'vpc_subnets': vpc.public_subnets,
        }
        service = ecs.FargateService(self, 'staff-fargate-service', **service_params)

        # application load balancer
        balancer_sg_params = {
            'security_group_name': f'staff-balancer-security-group-{app_env}',
            'description': f'staff balancer security group for {app_env}',
            'vpc': vpc,
        }
        balancer_sc = ec2.SecurityGroup(self, f'staff-balancer-security-group', **balancer_sg_params)
        balancer_sc.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), 'allow 80 port access from the world')
        balancer_sc.add_egress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), '')

        balancer_params = {
            'http2_enabled': True,
            'ip_address_type': elbv2.IpAddressType.IPV4,
            'security_group': balancer_sc,
            'vpc': vpc,
            'deletion_protection': False,
            'internet_facing': True,
            'load_balancer_name': f'staff-application-balancer-{app_env}',
            'vpc_subnets': vpc.public_subnets,
        }
        balancer = elbv2.ApplicationLoadBalancer(self, 'staff-application-balancer', **balancer_params)
        listener = balancer.add_listener('staff-listener', open=True, port=80)
        listener.add_target_groups('staff-target-group', target_groups=[service.load_balancer_target(
            container_name='web', container_port=80,
        )])
Пример #4
0
driveropts={
    "type": "nfs",
    "device":device_set,
    "o": efs_to_connect
    #"o": "addr=fs-XXXXXX.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"

}

docker_vol_config=ecs.DockerVolumeConfiguration(driver='local', scope=ecs.Scope.TASK, driver_opts=driveropts, labels=None)

docker_volume=ecs.Volume(name='docker_vol',docker_volume_configuration=docker_vol_config)

efs_mount=ecs.MountPoint(container_path='/efs',read_only=True, source_volume='docker_vol')

cluster = ecs.Cluster(
    stack, "wes-onetest-ecs",
    vpc=vpc
)
cluster.add_capacity("DefaultAutoScalingGroup",
                     instance_type=ec2.InstanceType("c5.xlarge"), key_name='aws-eb',max_capacity=4,machine_image=amitouse,
                     desired_capacity=2,min_capacity=2)

# Create a task definition with its own elastic network interface

iam.ServicePrincipal('task')

task_definition_vistaweb = ecs.Ec2TaskDefinition(
    stack, "west-onetest-task-vistaweb",
    network_mode=ecs.NetworkMode.AWS_VPC,
    volumes=[docker_volume]
)
    def __init__(self, scope: core.Construct, construct_id: str,
                 properties: WordpressStackProperties, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        database = rds.ServerlessCluster(
            self,
            "WordpressServerless",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            default_database_name="WordpressDatabase",
            vpc=properties.vpc,
            scaling=rds.ServerlessScalingOptions(
                auto_pause=core.Duration.seconds(0)),
            deletion_protection=False,
            backup_retention=core.Duration.days(7),
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        file_system = efs.FileSystem(
            self,
            "WebRoot",
            vpc=properties.vpc,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
        )

        # docker context directory
        docker_context_path = os.path.dirname(__file__) + "../../src"

        # upload images to ecr
        nginx_image = ecr_assets.DockerImageAsset(
            self,
            "Nginx",
            directory=docker_context_path,
            file="Docker.nginx",
        )

        wordpress_image = ecr_assets.DockerImageAsset(
            self,
            "Php",
            directory=docker_context_path,
            file="Docker.wordpress",
        )

        cluster = ecs.Cluster(self,
                              'ComputeResourceProvider',
                              vpc=properties.vpc)

        wordpress_volume = ecs.Volume(
            name="WebRoot",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=file_system.file_system_id))

        event_task = ecs.FargateTaskDefinition(self,
                                               "WordpressTask",
                                               volumes=[wordpress_volume])

        #
        # webserver
        #
        nginx_container = event_task.add_container(
            "Nginx",
            image=ecs.ContainerImage.from_docker_image_asset(nginx_image))

        nginx_container.add_port_mappings(ecs.PortMapping(container_port=80))

        nginx_container_volume_mount_point = ecs.MountPoint(
            read_only=True,
            container_path="/var/www/html",
            source_volume=wordpress_volume.name)
        nginx_container.add_mount_points(nginx_container_volume_mount_point)

        #
        # application server
        #
        app_container = event_task.add_container(
            "Php",
            environment={
                'WORDPRESS_DB_HOST': database.cluster_endpoint.hostname,
                'WORDPRESS_TABLE_PREFIX': 'wp_'
            },
            secrets={
                'WORDPRESS_DB_USER':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="username"),
                'WORDPRESS_DB_PASSWORD':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="password"),
                'WORDPRESS_DB_NAME':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="dbname"),
            },
            image=ecs.ContainerImage.from_docker_image_asset(wordpress_image))
        app_container.add_port_mappings(ecs.PortMapping(container_port=9000))

        container_volume_mount_point = ecs.MountPoint(
            read_only=False,
            container_path="/var/www/html",
            source_volume=wordpress_volume.name)
        app_container.add_mount_points(container_volume_mount_point)

        #
        # create service
        #
        wordpress_service = ecs.FargateService(
            self,
            "InternalService",
            task_definition=event_task,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            cluster=cluster,
        )

        #
        # scaling
        #
        scaling = wordpress_service.auto_scale_task_count(min_capacity=2,
                                                          max_capacity=50)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=85,
            scale_in_cooldown=core.Duration.seconds(120),
            scale_out_cooldown=core.Duration.seconds(30),
        )

        #
        # network acl
        #
        database.connections.allow_default_port_from(wordpress_service,
                                                     "wordpress access to db")
        file_system.connections.allow_default_port_from(wordpress_service)

        #
        # external access
        #
        wordpress_service.connections.allow_from(
            other=properties.load_balancer, port_range=ec2.Port.tcp(80))

        http_listener = properties.load_balancer.add_listener(
            "HttpListener",
            port=80,
        )

        http_listener.add_targets(
            "HttpServiceTarget",
            protocol=elbv2.ApplicationProtocol.HTTP,
            targets=[wordpress_service],
            health_check=elbv2.HealthCheck(healthy_http_codes="200,301,302"))
Пример #6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ******* Database table
        audiobooksDB = aws_dynamodb.Table(
            self,
            "audiobooksDB",
            partition_key=aws_dynamodb.Attribute(
                name="id", type=aws_dynamodb.AttributeType.STRING),
            read_capacity=2,
            write_capacity=2,
            billing_mode=aws_dynamodb.BillingMode.PROVISIONED)

        # ******* Lambda functions
        book_upload_lambda_function = aws_lambda.Function(
            self,
            "HandleBookUploadLambda",
            handler='app.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(
                '../Functions/handlers/handle_book_upload'))
        polly_audio_lambda_function = aws_lambda.Function(
            self,
            "HandlePollyAudioLambda",
            handler='app.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(
                '../Functions/handlers/handle_polly_audio'),
            timeout=core.Duration.seconds(120))

        # ******* S3 upload buckets
        BookUploadBucket = aws_s3.Bucket(self, "BookUploadBucket")
        AudioUploadBucket = aws_s3.Bucket(self, "AudioUploadBucket")
        VideoUploadBucket = aws_s3.Bucket(self, "VideoUploadBucket")
        ImageUploadBucket = aws_s3.Bucket(self, "ImageUploadBucket")

        # ******* Create S3 event source
        book_upload_lambda_function.add_event_source(
            S3EventSource(BookUploadBucket,
                          events=[aws_s3.EventType.OBJECT_CREATED],
                          filters=[{
                              "suffix": '.txt'
                          }]))
        # ******* Create SNS topic
        PollySNSTopic = aws_sns.Topic(self, "PollySNSTopic")
        PollySNSTopic.add_subscription(
            aws_sns_subscriptions.LambdaSubscription(
                polly_audio_lambda_function))

        # ******* Book function environment variables
        book_upload_lambda_function.add_environment("TABLE_NAME",
                                                    audiobooksDB.table_name)
        book_upload_lambda_function.add_environment(
            "AUDIO_S3_BUCKET", AudioUploadBucket.bucket_name)
        book_upload_lambda_function.add_environment("SNS_TOPIC",
                                                    PollySNSTopic.topic_arn)

        # ******* Book function permissions
        audiobooksDB.grant_write_data(book_upload_lambda_function)
        BookUploadBucket.grant_read(book_upload_lambda_function)
        AudioUploadBucket.grant_write(book_upload_lambda_function)
        PollySNSTopic.grant_publish(book_upload_lambda_function)
        book_upload_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["polly:*"], resources=["*"]))

        # ******* Fargate container permissions
        role = aws_iam.Role(
            self,
            "FargateContainerRole",
            assumed_by=aws_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:PutObject"],
                resources=[VideoUploadBucket.bucket_arn + "/*"]))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[AudioUploadBucket.bucket_arn + "/*"]))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[ImageUploadBucket.bucket_arn + "/*"]))

        # ******* Fargate container
        vpc = aws_ec2.Vpc(self, "CdkFargateVpc", max_azs=2)
        cluster = aws_ecs.Cluster(self, 'FargateCluster', vpc=vpc)
        image = aws_ecs.ContainerImage.from_asset(
            "../Functions/ECSContainerFiles")
        task_definition = aws_ecs.FargateTaskDefinition(
            self,
            "FargateContainerTaskDefinition",
            execution_role=role,
            task_role=role,
            cpu=1024,
            memory_limit_mib=3072)

        port_mapping = aws_ecs.PortMapping(container_port=80, host_port=80)
        container = task_definition.add_container(
            "Container",
            image=image,
            logging=aws_ecs.AwsLogDriver(
                stream_prefix="videoProcessingContainer"))
        container.add_port_mappings(port_mapping)

        # ******* Audio function environment variables
        polly_audio_lambda_function.add_environment(
            "VIDEO_S3_BUCKET", VideoUploadBucket.bucket_name)
        polly_audio_lambda_function.add_environment(
            "TASK_DEFINITION_ARN", task_definition.task_definition_arn)
        polly_audio_lambda_function.add_environment("CLUSTER_ARN",
                                                    cluster.cluster_arn)
        polly_audio_lambda_function.add_environment("TABLE_NAME",
                                                    audiobooksDB.table_name)
        polly_audio_lambda_function.add_environment("CONTAINER_NAME",
                                                    container.container_name)
        polly_audio_lambda_function.add_environment("VPC_ID", str(vpc.vpc_id))

        # ******* Audio function permissions
        audiobooksDB.grant_read_write_data(polly_audio_lambda_function)
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["ecs:RunTask"], resources=["*"]))
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["iam:PassRole"], resources=["*"]))
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["ec2:DescribeSubnets"],
                                    resources=["*"]))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # API Gateway needs to have resource policy granting FHIR Works on AWS lambda
        # execute permissions. Lambda function ARN will be passed during deployment as CDK context variable
        # FHIR Works lambda will need to have policy attached to its execution role
        # allowing it to invoke API
        # From --context resource-router-lambda-role="arn:aws:iam::123456789012:role/rolename"
        imported_resource_router_lambda_role = self.node.try_get_context(
            "resource-router-lambda-role"
        )
        # Amazon ECS on AWS Fargate container implementing connection manager
        # will be launched into a VPC that needs to have private and public subnets
        # and NAT gateway or instance
        # From --context vpc-id="vpc-123456"
        vpc_id = self.node.try_get_context("vpc-id")

        # The following parameters specify name of the HL7 server
        # that will be receiving transformed HL7v2 messages and TCP port
        # that it will be listening on
        # From --context hl7-server-name="hl7.example.com"
        # From --context hl7-port="2575"
        hl7_server_name = self.node.try_get_context("hl7-server-name")
        hl7_port = self.node.try_get_context("hl7-port")

        # In this proof of concept source of data for read interactions
        # is S3 bucket where mock HL7 server stores processed HL7 messages
        # From --context test-server-output-bucket-name="DOC-EXAMPLE-BUCKET"
        test_server_output_bucket_name = self.node.try_get_context(
            "test-server-output-bucket-name"
        )

        # SQS queue
        # Custom transform lambda communicates with Connectivity Manager using this SQS queue
        queue = sqs.Queue(
            self, f"{COMPONENT_PREFIX}Queue", encryption=sqs.QueueEncryption.KMS_MANAGED
        )

        # S3 Bucket to retrieve HL7v2 messages in proof of concept deployment
        test_server_output_bucket = s3.Bucket.from_bucket_name(
            self, f"{COMPONENT_PREFIX}OutputBucket", test_server_output_bucket_name
        )

        # Transform Lambda
        # Reference implementation of Custom Transform component of Transform Execution Environment

        transform_lambda = lambda_.Function(
            self,
            f"{COMPONENT_PREFIX}TransformLambda",
            handler="transform.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset(
                path.join(dirname, "../../lambda"),
                bundling={
                    "image": lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    "command": [
                        "bash",
                        "-c",
                        " && ".join(
                            [
                                "pip install --no-cache-dir -r requirements.txt -t /asset-output",
                                "(tar -c --exclude-from=exclude.lst -f - .)|(cd /asset-output; tar -xf -)",
                            ]
                        ),
                    ],
                },
            ),
            timeout=core.Duration.seconds(60),
            environment=dict(
                SQS_QUEUE=queue.queue_url,
                # The following parameter is optional
                S3_BUCKET_NAME=test_server_output_bucket_name,
            ),
        )
        queue.grant_send_messages(transform_lambda)

        # API Gateway with Lambda construct (using https://aws.amazon.com/solutions/constructs/patterns)
        # Reference implementation of Custom Transform component of Transform Execution Environment

        api_lambda = apigw_lambda.ApiGatewayToLambda(
            self,
            "ApiGw",
            existing_lambda_obj=transform_lambda,
            api_gateway_props=apigw.LambdaRestApiProps(
                handler=transform_lambda,
                proxy=False,
                rest_api_name=f"{COMPONENT_PREFIX_DASHES}-api",
                endpoint_export_name=f"{COMPONENT_PREFIX}ApiEndPoint",
                description=f"{COMPONENT_PREFIX} APIGW with Transform Lambda (FHIR to HL7v2)",
                default_method_options=apigw.MethodOptions(
                    authorization_type=apigw.AuthorizationType.IAM,
                ),
                policy=iam.PolicyDocument(
                    statements=[
                        iam.PolicyStatement(
                            actions=["execute-api:Invoke"],
                            effect=iam.Effect.ALLOW,
                            principals=[
                                iam.ArnPrincipal(imported_resource_router_lambda_role),
                            ],
                            resources=["execute-api:/*/*/*"],
                        )
                    ]
                ),
            ),
        )
        rest_api = api_lambda.api_gateway
        persistence = rest_api.root.add_resource("persistence")
        resource_type = persistence.add_resource("{resource_type}")
        resource_type.add_method("POST")
        resource_id = resource_type.add_resource("{id}")
        resource_id.add_method("GET")
        resource_id.add_method("PUT")
        resource_id.add_method("DELETE")

        # ECS Fargate Container (HL7v2 sender)
        # This container implements Connectivity Manager component
        # of Transform Execution Environment

        vpc = ec2.Vpc.from_lookup(self, "DefaultVpc", vpc_id=vpc_id)

        cluster = ecs.Cluster(self, f"{COMPONENT_PREFIX}Cluster", vpc=vpc)

        ecs_patterns.QueueProcessingFargateService(
            self,
            f"{COMPONENT_PREFIX}Service",
            cluster=cluster,
            image=ecs.ContainerImage.from_asset(path.join(dirname, "../../container")),
            queue=queue,
            desired_task_count=1,
            log_driver=ecs.LogDriver.aws_logs(
                stream_prefix=f"{COMPONENT_PREFIX}HL7Client",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
            environment=dict(
                SERVER_NAME=hl7_server_name,
                PORT_NUMBER=hl7_port,
            ),
        )

        # The following permission grants are needed to support
        # read interactions with integration transform
        test_server_output_bucket.grant_read(transform_lambda)

        transform_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["s3:ListBucket"],
                effect=iam.Effect.ALLOW,
                resources=[test_server_output_bucket.bucket_arn],
            )
        )
        transform_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["s3:GetObject"],
                effect=iam.Effect.ALLOW,
                resources=[test_server_output_bucket.arn_for_objects("*")],
            )
        )

        # CloudFormation Stack outputs
        # The following outputs needed to configure FHIR Works on AWS API interface
        core.CfnOutput(
            self,
            "TransformApiRootUrl",
            value=rest_api.url,
            export_name="TransformApiRootUrl",
        )
        core.CfnOutput(
            self,
            "TransformApiRegion",
            value=self.region,
            export_name="TransformApiRegion",
        )
        core.CfnOutput(
            self,
            "TransformApiAccountId",
            value=self.account,
            export_name="TransformApiAccountId",
        )
    def __init__(self, scope, id, **kwargs):
        super().__init__(scope, id, **kwargs)

        # Create random string to be used as suffix on some resource names
        resource_suffix = ''.join(
            random.choice(string.ascii_lowercase) for i in range(8))

        # Save it as SSM parameter to be used in runtime
        ssm.StringParameter(self,
                            "RESOURCE_SUFFIX",
                            string_value=resource_suffix,
                            parameter_name="RESOURCE_SUFFIX")

        # ====================================== VPC ======================================
        # Create VPC
        vpc = ec2.Vpc(self,
                      "sorterbot-vpc",
                      cidr="10.0.0.0/16",
                      enable_dns_support=True,
                      enable_dns_hostnames=True,
                      max_azs=2,
                      nat_gateways=0,
                      subnet_configuration=[
                          {
                              "subnetType": ec2.SubnetType.PUBLIC,
                              "name": "sorterbot-public-subnet-a",
                              "cidrMask": 24,
                          },
                          {
                              "subnetType": ec2.SubnetType.PUBLIC,
                              "name": "sorterbot-public-subnet-b",
                              "cidrMask": 24,
                          },
                      ])

        # Create security groups
        sg_vpc = ec2.SecurityGroup(self,
                                   "sorterbot-vpc-sg",
                                   vpc=vpc,
                                   allow_all_outbound=True,
                                   security_group_name="sorterbot-vpc-sg")
        sg_vpc.add_ingress_rule(sg_vpc, ec2.Port.all_traffic())

        sg_control = ec2.SecurityGroup(
            self,
            "sorterbot-control-sg",
            vpc=vpc,
            allow_all_outbound=True,
            security_group_name="sorterbot-control-sg")
        sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22))
        sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432))
        sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80))

        # ====================================== IAM ======================================
        cloud_role = iam.CfnRole(
            self,
            "SorterBotCloudRole",
            role_name="SorterBotCloudRole",
            assume_role_policy_document={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Sid": "",
                    "Effect": "Allow",
                    "Principal": {
                        "Service": "ecs-tasks.amazonaws.com"
                    },
                    "Action": "sts:AssumeRole"
                }]
            },
            managed_policy_arns=[
                "arn:aws:iam::aws:policy/AmazonS3FullAccess",
                "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy",
            ])

        # Create IAM policies
        iam.ManagedPolicy(self,
                          "SorterBotSecretsForECSPolicy",
                          managed_policy_name="SorterBotSecretsForECSPolicy",
                          roles=[cloud_role],
                          statements=[
                              iam.PolicyStatement(
                                  resources=["*"],
                                  actions=[
                                      "ssm:GetParameter", "ssm:GetParameters",
                                      "secretsmanager:GetSecretValue",
                                      "kms:Decrypt"
                                  ])
                          ])

        # ====================================== S3 ======================================
        # Create S3 buckets
        s3.Bucket(self,
                  f"sorterbot-{resource_suffix}",
                  bucket_name=f"sorterbot-{resource_suffix}",
                  removal_policy=core.RemovalPolicy.DESTROY)
        s3.Bucket(self,
                  f"sorterbot-weights-{resource_suffix}",
                  bucket_name=f"sorterbot-weights-{resource_suffix}",
                  removal_policy=core.RemovalPolicy.DESTROY)
        s3.Bucket(self,
                  f"sorterbot-static-{resource_suffix}",
                  bucket_name=f"sorterbot-static-{resource_suffix}",
                  removal_policy=core.RemovalPolicy.DESTROY,
                  cors=[
                      s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                                  allowed_origins=["*"],
                                  allowed_headers=["*"])
                  ])

        # ====================================== EC2 ======================================
        # Create EC2 instance for Control Panel
        control_panel_instance = ec2.Instance(
            self,
            f"sorterbot-control-panel-{resource_suffix}",
            instance_name=
            f"sorterbot-control-panel-{resource_suffix}",  # Since deleted instances stay around for a while in terminated state, random suffix is needed to prevent errors when destroying stack # noqa: E501
            instance_type=ec2.InstanceType("t2.micro"),
            machine_image=ec2.MachineImage.latest_amazon_linux(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            vpc=vpc,
            key_name="sorterbot",
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=sg_control)

        control_panel_instance.add_to_role_policy(
            iam.PolicyStatement(resources=["*"],
                                actions=[
                                    "ec2:DescribeNetworkInterfaces",
                                    "ssm:GetParameter", "ecs:*", "s3:*"
                                ]))

        # ====================================== RDS ======================================
        # Declare connection details
        master_username = "******"
        master_user_password = core.SecretValue.ssm_secure("PG_PASS",
                                                           version="1")
        port = 5432

        # Create postgres database
        database = rds.DatabaseInstance(
            self,
            "sorterbot_postgres",
            allocated_storage=10,
            backup_retention=core.Duration.days(
                0
            ),  # Don't save backups since storing them is not covered by the Free Tier
            database_name="sorterbot",
            delete_automated_backups=True,
            deletion_protection=False,
            engine=rds.DatabaseInstanceEngine.POSTGRES,
            engine_version="11",
            instance_class=ec2.InstanceType("t2.micro"),  # Stay in Free Tier
            instance_identifier="sorterbot-postgres",
            master_username=master_username,
            master_user_password=master_user_password,
            port=port,
            storage_type=rds.StorageType.GP2,
            vpc=vpc,
            vpc_placement=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC
            ),  # Make DB publicly accessible (with credentials)
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add ingress rule to allow external connections
        database.connections.allow_default_port_from_any_ipv4()

        # ====================================== ECR ======================================
        # Create ECR repository for Docker images
        ecr.Repository(self,
                       "sorterbot-ecr",
                       repository_name="sorterbot-ecr",
                       removal_policy=core.RemovalPolicy.DESTROY)

        # ====================================== ECS ======================================
        # Create ECS Cluster, Task Definition and Fargate Service
        ecs_cluster = ecs.Cluster(self,
                                  "sorterbot-ecs-cluster",
                                  vpc=vpc,
                                  cluster_name="sorterbot-ecs-cluster")
        task_definition = ecs.FargateTaskDefinition(
            self, "sorterbot-fargate-service", cpu=512, memory_limit_mib=4096)
        task_definition.add_container(
            "sorterbot-cloud-container",
            image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"))
        ecs.FargateService(self,
                           "sorterbot-ecs-service",
                           cluster=ecs_cluster,
                           task_definition=task_definition,
                           assign_public_ip=True,
                           service_name="sorterbot-ecs-service",
                           desired_count=0,
                           security_group=sg_vpc)

        # Save resource suffix to disk to be used when destroying
        with open(
                Path(__file__).parents[1].joinpath("scripts", "variables",
                                                   "RESOURCE_SUFFIX"),
                "w") as outfile:
            outfile.write(resource_suffix)
Пример #9
0
##device_set=efsdns+":/"
#driveropts={
#    "type": "nfs",
#    "device":device_set,
#    "o": efs_to_connect
#    #"o": "addr=fs-XXXXXX.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"

#}

#docker_vol_config=ecs.DockerVolumeConfiguration(driver='local', scope=ecs.Scope.TASK, driver_opts=driveropts, labels=None)

#docker_volume=ecs.Volume(name='docker_vol',docker_volume_configuration=docker_vol_config)

#efs_mount=ecs.MountPoint(container_path='/efs',read_only=True, source_volume='docker_vol')

cluster = ecs.Cluster(stack, "wes-ecs", vpc=vpc, cluster_name=App_Name)
print('cluster sec group ', str(type(cluster.autoscaling_group)))
#cluster.add_capacity("DefaultAutoScalingGroup",
#                     instance_type=ec2.InstanceType("c5.xlarge"), key_name='Vonc-Prod-Key',max_capacity=4,machine_image=amitouse,
#                     desired_capacity=2,min_capacity=2)

print('connections ', str(cluster.connections))
port = ec2.Port(protocol=ec2.Protocol.TCP,
                string_representation='inbound to container instances',
                from_port=22,
                to_port=22)
cluster.connections.add_security_group(app_security_group_import)
cluster.connections.allow_from_any_ipv4(port,
                                        'in bound to container instances')

# Create a task definition with its own elastic network interface
Пример #10
0
    def __init__(self, scope: core.Construct, id: str,
                 distributed_locust: bool, target_url: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc_cidr = "10.51.0.0/16"
        number_of_slaves = 3
        ecs_instance_type = "c5.large"
        #Build new VPC
        vpc = ec2.Vpc(self,
                      "loadgenvpc",
                      cidr=vpc_cidr,
                      subnet_configuration=[{
                          "cidrMask": 24,
                          "name": "ecsvpc",
                          "subnetType": ec2.SubnetType.PUBLIC
                      }, {
                          "cidrMask":
                          24,
                          "name":
                          "ecsprivatevpc",
                          "subnetType":
                          ec2.SubnetType.PRIVATE,
                      }])

        #ECS cluster for the loadgen
        loadgen_cluster = ecs.Cluster(self, "Loadgen-Cluster", vpc=vpc)

        loadgen_cluster.add_capacity(
            "AsgSpot",
            max_capacity=2,
            min_capacity=2,
            desired_capacity=2,
            instance_type=ec2.InstanceType(ecs_instance_type),
            spot_price="0.07",
            # Enable the Automated Spot Draining support for Amazon ECS
            spot_instance_draining=True)
        #cloudmap for service discovery so slaves can lookup mast via dns
        loadgen_cluster.add_default_cloud_map_namespace(name="loadgen")

        #Create a graph widget to track reservation metrics for our cluster
        ecs_widget = cw.GraphWidget(
            left=[loadgen_cluster.metric_cpu_reservation()],
            right=[loadgen_cluster.metric_memory_reservation()],
            title="ECS - CPU and Memory Reservation",
        )

        #CloudWatch dashboard to monitor our stuff
        dashboard = cw.Dashboard(self, "Locustdashboard")
        dashboard.add_widgets(ecs_widget)

        if not distributed_locust:
            role = "standalone"
            locustContainer(self, "locust" + role, vpc, loadgen_cluster, role,
                            target_url)
        else:
            role = "master"
            master_construct = locustContainer(self, "locust" + role, vpc,
                                               loadgen_cluster, role,
                                               target_url)

            lb_widget = cw.GraphWidget(
                left=[
                    master_construct.lb.metric_active_connection_count(),
                    master_construct.lb.metric_target_response_time()
                ],
                right=[master_construct.lb.metric_request_count()],
                title="Load Balancer")

            dashboard.add_widgets(lb_widget)

            role = "slave"
            slave_construct = locustContainer(self, "locust" + role, vpc,
                                              loadgen_cluster, role,
                                              target_url, number_of_slaves)
            slave_construct.node.add_dependency(master_construct)
Пример #11
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        cpu: Union[int, float] = 256,
        memory: Union[int, float] = 512,
        mincount: int = 1,
        maxcount: int = 50,
        permissions: Optional[iam.PolicyStatement] = None,
        env: dict = {},
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, *kwargs)

        vpc = ec2.Vpc(self, f"{id}-vpc", max_azs=2)

        cluster = ecs.Cluster(self, f"{id}-cluster", vpc=vpc)

        task_env = DEFAULT_ENV.copy()
        task_env.update(
            dict(
                MODULE_NAME="titiler.main",
                VARIABLE_NAME="app",
                WORKERS_PER_CORE="1",
                LOG_LEVEL="error",
            ))
        task_env.update(env)

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            f"{id}-service",
            cluster=cluster,
            cpu=cpu,
            memory_limit_mib=memory,
            desired_count=mincount,
            public_load_balancer=True,
            listener_port=80,
            task_image_options=dict(
                image=ecs.ContainerImage.from_asset(
                    code_dir,
                    exclude=["cdk.out", ".git"],
                    file="Dockerfiles/ecs/Dockerfile",
                ),
                container_port=80,
                environment=task_env,
            ),
        )

        if permissions:
            fargate_service.task_definition.task_role.add_to_policy(
                permissions)

        scalable_target = fargate_service.service.auto_scale_task_count(
            min_capacity=mincount, max_capacity=maxcount)

        # https://github.com/awslabs/aws-rails-provisioner/blob/263782a4250ca1820082bfb059b163a0f2130d02/lib/aws-rails-provisioner/scaling.rb#L343-L387
        scalable_target.scale_on_request_count(
            "RequestScaling",
            requests_per_target=50,
            scale_in_cooldown=core.Duration.seconds(240),
            scale_out_cooldown=core.Duration.seconds(30),
            target_group=fargate_service.target_group,
        )

        # scalable_target.scale_on_cpu_utilization(
        #     "CpuScaling", target_utilization_percent=70,
        # )

        fargate_service.service.connections.allow_from_any_ipv4(
            port_range=ec2.Port(
                protocol=ec2.Protocol.ALL,
                string_representation="All port 80",
                from_port=80,
            ),
            description="Allows traffic on port 80 from NLB",
        )
Пример #12
0
    def __init__(self, scope: core.Construct, id: str, vpc: aws_ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ECS Cluster
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#clusters
        self.ecs_cluster = aws_ecs.Cluster(
            self, "ECSCluster", vpc=vpc, cluster_name="Fargate-microservices")

        # ECS Enable Cloud map
        self.ecs_cluster.add_default_cloud_map_namespace(name="service", )

        # ECS adding capacity
        self.asg = self.ecs_cluster.add_capacity(
            "ECSEC2Capacity",
            instance_type=aws_ec2.InstanceType(
                instance_type_identifier='t3.small'),
            min_capacity=0,
            max_capacity=10)

        core.CfnOutput(self,
                       "EC2AutoScalingGroupName",
                       value=self.asg.auto_scaling_group_name,
                       export_name="EC2ASGName")

        # Namespace details as CFN output
        self.namespace_outputs = {
            'ARN':
            self.ecs_cluster.default_cloud_map_namespace.
            private_dns_namespace_arn,
            'NAME':
            self.ecs_cluster.default_cloud_map_namespace.
            private_dns_namespace_name,
            'ID':
            self.ecs_cluster.default_cloud_map_namespace.
            private_dns_namespace_id,
        }

        # Cluster Attributes
        self.cluster_outputs = {
            'NAME': self.ecs_cluster.cluster_name,
            'SECGRPS': str(self.ecs_cluster.connections.security_groups)
        }

        # When enabling EC2, we need the security groups "registered" to the cluster for imports in other service stacks
        if self.ecs_cluster.connections.security_groups:
            self.cluster_outputs['SECGRPS'] = str([
                x.security_group_id
                for x in self.ecs_cluster.connections.security_groups
            ][0])

        # Allow inbound 80 from ALB to Frontend Service

        self.services_80_sec_group = aws_ec2.SecurityGroup(
            self,
            "FrontendToBackendSecurityGroup",
            allow_all_outbound=True,
            description=
            "Security group to allow inbound 80 from ALB to Frontend Service",
            vpc=vpc)

        self.sec_grp_ingress_self_80 = aws_ec2.CfnSecurityGroupIngress(
            self,
            "InboundSecGrp3000",
            ip_protocol='TCP',
            source_security_group_id=self.services_80_sec_group.
            security_group_id,
            from_port=80,
            to_port=80,
            group_id=self.services_80_sec_group.security_group_id)

        # All Outputs required for other stacks to build
        core.CfnOutput(self,
                       "NSArn",
                       value=self.namespace_outputs['ARN'],
                       export_name="NSARN")
        core.CfnOutput(self,
                       "NSName",
                       value=self.namespace_outputs['NAME'],
                       export_name="NSNAME")
        core.CfnOutput(self,
                       "NSId",
                       value=self.namespace_outputs['ID'],
                       export_name="NSID")
        core.CfnOutput(self,
                       "FE2BESecGrp",
                       value=self.services_80_sec_group.security_group_id,
                       export_name="SecGrpId")
        core.CfnOutput(self,
                       "ECSClusterName",
                       value=self.cluster_outputs['NAME'],
                       export_name="ECSClusterName")
        core.CfnOutput(self,
                       "ECSClusterSecGrp",
                       value=self.cluster_outputs['SECGRPS'],
                       export_name="ECSSecGrpList")
        core.CfnOutput(self,
                       "ServicesSecGrp",
                       value=self.services_80_sec_group.security_group_id,
                       export_name="ServicesSecGrp")
Пример #13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        #vpc = ec2.Vpc.from_lookup(self, 'VPC', is_default=True)

        vpc = ec2.Vpc(self, "MyVpc", max_azs=2)

        rdsInst = rds.DatabaseInstance(
            self,
            'SpringPetclinicDB',
            engine=rds.DatabaseInstanceEngine.MYSQL,
            instance_class=ec2.InstanceType('t2.medium'),
            master_username='******',
            database_name='petclinic',
            master_user_password=core.SecretValue('Welcome#123456'),
            vpc=vpc,
            deletion_protection=False,
            backup_retention=core.Duration.days(0),
            removal_policy=core.RemovalPolicy.DESTROY,
            #vpc_placement = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
        )

        rdsInst.connections.allow_default_port_from_any_ipv4()

        cluster = ecs.Cluster(self, 'EcsCluster', vpc=vpc)

        asset = ecr_assets.DockerImageAsset(
            self,
            'spring-petclinic',
            directory='./docker/',
            build_args={
                'JAR_FILE': 'spring-petclinic-2.1.0.BUILD-SNAPSHOT.jar'
            })

        cluster.add_capacity(
            "DefaultAutoScalingGroup",
            instance_type=ec2.InstanceType('t2.large'),
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            min_capacity=2)

        ecs_service = ecs_patterns.ApplicationLoadBalancedEc2Service(
            self,
            "Ec2Service",
            cluster=cluster,
            memory_limit_mib=1024,
            service_name='spring-petclinic',
            desired_count=2,
            task_image_options={
                "image": ecs.ContainerImage.from_docker_image_asset(asset),
                "container_name": 'spring-petclinic',
                "container_port": 8080,
                "environment": {
                    'SPRING_DATASOURCE_PASSWORD':
                    '******',
                    'SPRING_DATASOURCE_USERNAME':
                    '******',
                    'SPRING_PROFILES_ACTIVE':
                    'mysql',
                    'SPRING_DATASOURCE_URL':
                    'jdbc:mysql://' + rdsInst.db_instance_endpoint_address +
                    '/petclinic?useUnicode=true'
                }
            })
Пример #14
0
    def __init__(self, scope: core.Construct, id: str, ctx: object,
                 ecr_repository: ecr.Repository, kinesis_stream: ks.Stream,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.ecr_repository = ecr_repository

        self.vpc = ec2.Vpc.from_vpc_attributes(self, "VPC",
                                               **ctx.vpc_props.dict())

        # CloudWatch Logs Group
        self.log_group = cwl.LogGroup(scope=self, id="logs")

        self.kinesis_stream = kinesis_stream

        # Create a new ECS cluster for our services
        self.cluster = ecs.Cluster(self, vpc=self.vpc, id=f"{id}_cluster")
        cluster_name_output = core.CfnOutput(scope=self,
                                             id="cluster-name-out",
                                             value=self.cluster.cluster_name,
                                             export_name=f"{id}-cluster-name")

        # Create a role for ECS to interact with AWS APIs with standard permissions
        self.ecs_exec_role = iam.Role(
            scope=self,
            id="ecs_logstash-exec_role",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            managed_policies=([
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonECSTaskExecutionRolePolicy")
            ]))
        # Grant ECS additional permissions to decrypt secrets from Secrets Manager that have been encrypted with our custom key
        if getattr(ctx, "secrets_key_arn", None) is not None:
            self.ecs_exec_role.add_to_policy(
                iam.PolicyStatement(actions=["kms:Decrypt"],
                                    effect=iam.Effect.ALLOW,
                                    resources=[ctx.secrets_key_arn]))
        # Grant ECS permissions to log to our log group
        self.log_group.grant_write(self.ecs_exec_role)

        # Load Balancer for Listening Services
        self.load_balancer = elb2.NetworkLoadBalancer(scope=self,
                                                      id=f"{id}-nlb",
                                                      vpc=self.vpc,
                                                      internet_facing=False,
                                                      cross_zone_enabled=True)

        # Create listener services
        service_names = []
        for service_name in getattr(ctx.inbound.services, "nlb", []):
            self.__create_nlb_service(service_name[0], ctx)
            service_names.append(service_name[0])

        for service_name in getattr(ctx.inbound.services, "cloudmap", []):
            self.__create_cloudmap_service(service_name[0], ctx)
            service_names.append(service_name[0])

        for service_name in getattr(ctx.inbound.services, "pull", []):
            self.__create_pull_service(service_name[0], ctx)
            service_names.append(service_name[0])

        service_names_output = core.CfnOutput(
            scope=self,
            id="service-names-out",
            value=",".join(service_names),
            export_name=f"{id}-service-names")
Пример #15
0
    def __init__(self, scope, id, vpc, **kwarg) -> None:
        super().__init__(scope, id, **kwarg)

        # cluster creation
        cluster = aws_ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        # service discovery creation
        sd_namespace = cluster.add_default_cloud_map_namespace(
            name="svc.test.local", vpc=vpc)
        aws_servicediscovery.Service(self,
                                     "svc.test.local",
                                     namespace=sd_namespace,
                                     load_balancer=True)

        # ECS role creation
        ecs_principle = aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        execution_role = aws_iam.Role(self,
                                      'execution-role',
                                      assumed_by=ecs_principle)
        execution_role.add_managed_policy(
            policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AWSCodeDeployRoleForECS"))
        execution_role.add_managed_policy(
            policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ContainerRegistryReadOnly"))
        task_role = aws_iam.Role(self, 'task-role', assumed_by=ecs_principle)
        task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AWSAppMeshEnvoyAccess"))
        task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="CloudWatchFullAccess"))
        task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AWSXRayDaemonWriteAccess"))

        # envoy ecr object
        envoy_ecr = aws_ecr.Repository.from_repository_attributes(
            self,
            'aws-envoy',
            repository_arn=core.Stack.of(self).format_arn(
                service="ecr",
                resource="aws-appmesh-envoy",
                account="840364872350"),
            repository_name="aws-appmesh-envoy")

        # colorteller image builds
        gateway_image = aws_ecs.ContainerImage.from_asset("./src/gateway")
        colorteller_image = aws_ecs.ContainerImage.from_asset(
            "./src/colorteller")

        # logging setup
        log_group = aws_logs.LogGroup(self,
                                      "/ecs/colorteller",
                                      retention=aws_logs.RetentionDays.ONE_DAY)
        gateway_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                      stream_prefix="gateway")
        black_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                    stream_prefix="black")
        blue_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                   stream_prefix="blue")
        red_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                  stream_prefix="red")
        white_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                    stream_prefix="white")
        tcpecho_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group,
                                                      stream_prefix="tcpecho")

        # Mesh properties setup
        mesh_properties = aws_ecs.AppMeshProxyConfigurationProps(
            app_ports=[9080],
            proxy_egress_port=15001,
            proxy_ingress_port=15000,
            egress_ignored_i_ps=["169.254.170.2", "169.254.169.254"],
            ignored_uid=1337)

        # envoy ulimit defaults
        envoy_ulimit = aws_ecs.Ulimit(hard_limit=15000,
                                      name=aws_ecs.UlimitName.NOFILE,
                                      soft_limit=15000)

        # fargate task def - requires envoy proxy container, gateway app and x-ray
        gateway_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "gateway_task",
            cpu=256,
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        gateway_task_def.add_container("gateway",
                                       logging=gateway_ecs_logs,
                                       environment={
                                           "SERVER_PORT":
                                           "9080",
                                           "STAGE":
                                           "v1.1",
                                           "COLOR_TELLER_ENDPOINT":
                                           "colorteller.svc.test.local:9080",
                                           "TCP_ECHO_ENDPOINT":
                                           "tcpecho.svc.test.local:2701"
                                       },
                                       image=gateway_image).add_port_mappings(
                                           aws_ecs.PortMapping(
                                               container_port=9080,
                                               protocol=aws_ecs.Protocol.TCP))
        gateway_task_def.add_container(
            "xray",
            logging=gateway_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))
        gateway_envoy_container = gateway_task_def.add_container(
            "envoy",
            logging=gateway_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "debug",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/gateway",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        gateway_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        gateway_envoy_container.add_ulimits(envoy_ulimit)

        # black task def - requires color app, envoy and x-ray containers
        black_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "black-task",
            cpu=256,
            family="black",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        black_envoy_container = black_task_def.add_container(
            "envoy",
            logging=black_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/black",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        black_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        black_envoy_container.add_ulimits(envoy_ulimit)
        black_app_container = black_task_def.add_container(
            "black",
            logging=black_ecs_logs,
            environment={
                "COLOR": "black",
                "SERVER_PORT": "9080",
                "STAGE": "v1.1"
            },
            image=colorteller_image)
        black_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        black_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=black_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        black_task_def.add_container(
            "xray",
            logging=black_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # blue task def (same as black)
        blue_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "blue-task",
            cpu=256,
            family="blue",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        blue_envoy_container = blue_task_def.add_container(
            "envoy",
            logging=blue_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/blue",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        blue_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        blue_envoy_container.add_ulimits(envoy_ulimit)
        blue_app_container = blue_task_def.add_container(
            "blue",
            logging=blue_ecs_logs,
            environment={
                "COLOR": "black",
                "SERVER_PORT": "9080",
                "STAGE": "v1.1"
            },
            image=colorteller_image)
        blue_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        blue_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=blue_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        blue_task_def.add_container(
            "xray",
            logging=blue_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # red task def (same as black)
        red_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "red-task",
            cpu=256,
            family="red-task",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        red_envoy_container = red_task_def.add_container(
            "envoy",
            logging=red_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/red",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        red_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        red_envoy_container.add_ulimits(envoy_ulimit)
        red_app_container = red_task_def.add_container("red",
                                                       logging=red_ecs_logs,
                                                       environment={
                                                           "COLOR": "red",
                                                           "SERVER_PORT":
                                                           "9080",
                                                           "STAGE": "v1.2"
                                                       },
                                                       image=colorteller_image)
        red_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        red_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=red_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        red_task_def.add_container(
            "xray",
            logging=red_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # white task def (same as black) - colorteller.svc.test.local points to this service (because containers need something to resolve to or they fail)
        white_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "white-task",
            cpu=256,
            family="white",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role,
            proxy_configuration=aws_ecs.AppMeshProxyConfiguration(
                container_name="envoy", properties=mesh_properties))
        white_envoy_container = white_task_def.add_container(
            "envoy",
            logging=white_ecs_logs,
            environment={
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_XRAY_TRACING": "1",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                "APPMESH_VIRTUAL_NODE_NAME":
                "mesh/ColorTellerAppMesh/virtualNode/white",
                "APPMESH_XDS_ENDPOINT": ""
            },
            image=aws_ecs.ContainerImage.from_ecr_repository(
                repository=envoy_ecr, tag="v1.12.1.1-prod"),
            essential=True,
            user="******",
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/ready |grep -q LIVE"
            ]))
        white_envoy_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9901,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15000,
                                protocol=aws_ecs.Protocol.TCP),
            aws_ecs.PortMapping(container_port=15001,
                                protocol=aws_ecs.Protocol.TCP),
        )
        white_envoy_container.add_ulimits(envoy_ulimit)
        white_app_container = white_task_def.add_container(
            "white",
            logging=white_ecs_logs,
            environment={
                "COLOR": "white",
                "SERVER_PORT": "9080",
                "STAGE": "v1.1"
            },
            image=colorteller_image)
        white_app_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=9080,
                                protocol=aws_ecs.Protocol.TCP))
        white_app_container.add_container_dependencies(
            aws_ecs.ContainerDependency(
                container=white_envoy_container,
                condition=aws_ecs.ContainerDependencyCondition.HEALTHY))
        white_task_def.add_container(
            "xray",
            logging=white_ecs_logs,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/aws-xray-daemon")).add_port_mappings(
                    aws_ecs.PortMapping(container_port=2000,
                                        protocol=aws_ecs.Protocol.UDP))

        # tcpecho service (external docker image)
        tcpecho_task_def = aws_ecs.FargateTaskDefinition(
            self,
            'tcpecho-tasks',
            cpu=256,
            family="tcpecho",
            memory_limit_mib=512,
            execution_role=execution_role,
            task_role=task_role)
        tcpecho_task_def.add_container(
            "tcpecho",
            logging=tcpecho_ecs_logs,
            environment={
                "TCP_PORT": "2701",
                "NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/echo"
            },
            image=aws_ecs.ContainerImage.from_registry("cjimti/go-echo"),
            essential=True,
        ).add_port_mappings(
            aws_ecs.PortMapping(container_port=2701,
                                protocol=aws_ecs.Protocol.TCP))

        # adds task defs to fargate services - adds security group access to local vpc cidr block
        # all the services are treated the same way
        gateway_fargate_service = aws_ecs.FargateService(
            self,
            "gateway",
            cluster=cluster,
            task_definition=gateway_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="gateway"))
        gateway_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        black_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "black",
            cluster=cluster,
            task_definition=black_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="black"))
        black_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        blue_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "blue",
            cluster=cluster,
            task_definition=blue_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="blue"))
        blue_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        red_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "red",
            cluster=cluster,
            task_definition=red_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="red"))
        red_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        white_colorteller_fargate_service = aws_ecs.FargateService(
            self,
            "white",
            cluster=cluster,
            task_definition=white_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="colorteller"))
        white_colorteller_fargate_service.connections.security_groups[
            0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=aws_ec2.Port.tcp(9080),
                                description="Allow http inbound from VPC")
        echo_fargate_service = aws_ecs.FargateService(
            self,
            "tcpecho",
            cluster=cluster,
            task_definition=tcpecho_task_def,
            desired_count=2,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=sd_namespace, name="tcpecho"))
        echo_fargate_service.connections.security_groups[0].add_ingress_rule(
            peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=aws_ec2.Port.tcp(2701),
            description="Allow http inbound from VPC")

        # adds autoscaling policies to all services
        for service in [
                black_colorteller_fargate_service,
                blue_colorteller_fargate_service,
                red_colorteller_fargate_service,
                white_colorteller_fargate_service, gateway_fargate_service,
                echo_fargate_service
        ]:
            try:
                scaling = service.service.auto_scale_task_count(max_capacity=2)
            except AttributeError:
                scaling = service.auto_scale_task_count(max_capacity=2)
            scaling.scale_on_cpu_utilization(
                "CpuScaling",
                target_utilization_percent=50,
                scale_in_cooldown=core.Duration.seconds(60),
                scale_out_cooldown=core.Duration.seconds(60),
            )

        # configure loadbalancer to listen on port 80 and add targets to gateway and echo apps
        load_balancer = aws_elasticloadbalancingv2.ApplicationLoadBalancer(
            self, "lb", vpc=vpc, internet_facing=True)
        listener = load_balancer.add_listener("PublicListener",
                                              port=80,
                                              open=True)

        health_check = aws_elasticloadbalancingv2.HealthCheck(
            interval=core.Duration.seconds(60),
            path="/ping",
            port="9080",
            timeout=core.Duration.seconds(5))

        # attach ALB to ECS service
        listener.add_targets(
            "gateway",
            port=80,
            targets=[gateway_fargate_service, echo_fargate_service],
            health_check=health_check,
        )

        # outputs of ALB and cluster
        core.CfnOutput(self,
                       "LoadBalancerDNS",
                       value=load_balancer.load_balancer_dns_name)
        core.CfnOutput(self, "ClusterName", value=cluster.cluster_name)
Пример #16
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        cpu: Union[int, float] = 256,
        memory: Union[int, float] = 512,
        mincount: int = 1,
        maxcount: int = 50,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        env: Optional[Dict] = None,
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, *kwargs)

        permissions = permissions or []
        env = env or {}

        vpc = ec2.Vpc(self, f"{id}-vpc", max_azs=2)

        cluster = ecs.Cluster(self, f"{id}-cluster", vpc=vpc)

        task_env = env.copy()
        task_env.update(dict(LOG_LEVEL="error"))

        # GUNICORN configuration
        if settings.workers_per_core:
            task_env.update({"WORKERS_PER_CORE": str(settings.workers_per_core)})
        if settings.max_workers:
            task_env.update({"MAX_WORKERS": str(settings.max_workers)})
        if settings.web_concurrency:
            task_env.update({"WEB_CONCURRENCY": str(settings.web_concurrency)})

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            f"{id}-service",
            cluster=cluster,
            cpu=cpu,
            memory_limit_mib=memory,
            desired_count=mincount,
            public_load_balancer=True,
            listener_port=80,
            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry(
                    f"public.ecr.aws/developmentseed/titiler:{settings.image_version}",
                ),
                container_port=80,
                environment=task_env,
            ),
        )
        fargate_service.target_group.configure_health_check(path="/ping")

        for perm in permissions:
            fargate_service.task_definition.task_role.add_to_policy(perm)

        scalable_target = fargate_service.service.auto_scale_task_count(
            min_capacity=mincount, max_capacity=maxcount
        )

        # https://github.com/awslabs/aws-rails-provisioner/blob/263782a4250ca1820082bfb059b163a0f2130d02/lib/aws-rails-provisioner/scaling.rb#L343-L387
        scalable_target.scale_on_request_count(
            "RequestScaling",
            requests_per_target=50,
            scale_in_cooldown=core.Duration.seconds(240),
            scale_out_cooldown=core.Duration.seconds(30),
            target_group=fargate_service.target_group,
        )

        # scalable_target.scale_on_cpu_utilization(
        #     "CpuScaling", target_utilization_percent=70,
        # )

        fargate_service.service.connections.allow_from_any_ipv4(
            port_range=ec2.Port(
                protocol=ec2.Protocol.ALL,
                string_representation="All port 80",
                from_port=80,
            ),
            description="Allows traffic on port 80 from ALB",
        )
Пример #17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # Create a VPC
        myvpc = ec2.Vpc(self, "CDKVPC", cidr=vars.cidr)

        # SG for ELB creation
        websitefrontendSG = ec2.SecurityGroup(
            self,
            'websitefrontendSG',
            vpc=myvpc,
            security_group_name='websitefrontendSG')
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(80))
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(443))

        # Create ALB in VPC
        alb = elb.ApplicationLoadBalancer(
            self,
            'websitefrontend-public',
            vpc=myvpc,
            load_balancer_name='websitefrontend-public',
            security_group=websitefrontendSG,
            internet_facing=True)

        # Add target group to ALB
        catalogtargetgroup = elb.ApplicationTargetGroup(
            self,
            'CatalogTargetGroup',
            port=80,
            vpc=myvpc,
            target_type=elb.TargetType.IP)

        if not vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(
                self,
                'alblistenerhttp',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=80)

        if vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(
                self,
                'alblistenerhttp',
                load_balancer=alb,
                port=80,
                default_target_groups=[catalogtargetgroup])
            elb.ApplicationListenerRule(self,
                                        'httpredirectionrule',
                                        priority=1,
                                        host_header=vars.zone_name,
                                        listener=alblistenerhttp,
                                        redirect_response=elb.RedirectResponse(
                                            status_code='HTTP_301',
                                            port='443',
                                            protocol='HTTPS'))
            # OPTIONAL - Add https listener to ALB & attach certificate
            alblistenerhttps = elb.ApplicationListener(
                self,
                'alblistenerhttps',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=443,
                certificate_arns=[vars.sslcert_arn])

            # OPTIONAL - Redirect HTTP to HTTPS
            # alblistenerhttp.add_redirect_response(
            #     id = 'redirectionrule',
            #     port = '443',
            #     status_code = 'HTTP_301',
            #     protocol = 'HTTPS'
            # )

        if vars.customdomain:
            # OPTIONAL - Update DNS with ALB
            webshopxyz_zone = r53.HostedZone.from_hosted_zone_attributes(
                self,
                id='customdomain',
                hosted_zone_id=vars.hosted_zone_id,
                zone_name=vars.zone_name)
            webshop_root_record = r53.ARecord(
                self,
                'ALBAliasRecord',
                zone=webshopxyz_zone,
                target=r53.RecordTarget.from_alias(
                    alias.LoadBalancerTarget(alb)))

        # SG for ECS creation
        ECSSG = ec2.SecurityGroup(self,
                                  'ECSSecurityGroup',
                                  vpc=myvpc,
                                  security_group_name='ECS')
        ECSSG.add_ingress_rule(peer=websitefrontendSG,
                               connection=ec2.Port.tcp(80))

        # SG for MySQL creation
        MySQLSG = ec2.SecurityGroup(self,
                                    'DBSecurityGroup',
                                    vpc=myvpc,
                                    security_group_name='DB')
        MySQLSG.add_ingress_rule(peer=ECSSG, connection=ec2.Port.tcp(3306))

        # Create DB subnet group
        subnetlist = []
        for subnet in myvpc.private_subnets:
            subnetlist.append(subnet.subnet_id)
        subnetgr = rds.CfnDBSubnetGroup(
            self,
            'democlustersubnetgroup',
            db_subnet_group_name='democlustersubnetgroup',
            db_subnet_group_description='DemoCluster',
            subnet_ids=subnetlist)

        # Create secret db passwd
        secret = sm.SecretStringGenerator(
            exclude_characters="\"'@/\\",
            secret_string_template='{"username": "******"}',
            generate_string_key='password',
            password_length=40)
        dbpass = sm.Secret(self,
                           'democlusterpass',
                           secret_name='democlusterpass',
                           generate_secret_string=secret)

        # Create Aurora serverless MySQL instance
        dbcluster = rds.CfnDBCluster(
            self,
            'DemoCluster',
            engine='aurora',
            engine_mode='serverless',
            engine_version='5.6',
            db_cluster_identifier='DemoCluster',
            master_username=dbpass.secret_value_from_json(
                'username').to_string(),
            master_user_password=dbpass.secret_value_from_json(
                'password').to_string(),
            storage_encrypted=True,
            port=3306,
            vpc_security_group_ids=[MySQLSG.security_group_id],
            scaling_configuration=rds.CfnDBCluster.
            ScalingConfigurationProperty(auto_pause=True,
                                         max_capacity=4,
                                         min_capacity=1,
                                         seconds_until_auto_pause=300),
            db_subnet_group_name=subnetgr.db_subnet_group_name)
        dbcluster.add_override('DependsOn', 'democlustersubnetgroup')

        # Attach database to secret
        attach = sm.CfnSecretTargetAttachment(
            self,
            'RDSAttachment',
            secret_id=dbpass.secret_arn,
            target_id=dbcluster.ref,
            target_type='AWS::RDS::DBCluster')

        # Upload image into ECR repo
        ecrdemoimage = ecra.DockerImageAsset(self,
                                             'ecrdemoimage',
                                             directory='../',
                                             repository_name='demorepo',
                                             exclude=['cdk.out'])

        # Create ECS fargate cluster
        ecscluster = ecs.Cluster(self, "ecsCluster", vpc=myvpc)

        # Create task role for productsCatalogTask
        getsecretpolicystatement = iam.PolicyStatement(actions=[
            "secretsmanager:GetResourcePolicy",
            "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret",
            "secretsmanager:ListSecretVersionIds"
        ],
                                                       resources=[
                                                           dbpass.secret_arn
                                                       ],
                                                       effect=iam.Effect.ALLOW)
        getsecretpolicydocument = iam.PolicyDocument(
            statements=[getsecretpolicystatement])
        taskrole = iam.Role(
            self,
            'TaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            role_name='TaskRoleforproductsCatalogTask',
            inline_policies=[getsecretpolicydocument])

        # Create task definition
        taskdefinition = ecs.FargateTaskDefinition(self,
                                                   'productsCatalogTask',
                                                   cpu=1024,
                                                   memory_limit_mib=2048,
                                                   task_role=taskrole)

        # Add container to task definition
        productscatalogcontainer = taskdefinition.add_container(
            'productscatalogcontainer',
            image=ecs.ContainerImage.from_docker_image_asset(
                asset=ecrdemoimage),
            environment={
                "region": vars.region,
                "secretname": "democlusterpass"
            })
        productscatalogcontainer.add_port_mappings(
            ecs.PortMapping(container_port=80, host_port=80))

        # Create service and associate it with the cluster
        catalogservice = ecs.FargateService(
            self,
            'catalogservice',
            task_definition=taskdefinition,
            assign_public_ip=False,
            security_group=ECSSG,
            vpc_subnets=ec2.SubnetSelection(subnets=myvpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnets),
            cluster=ecscluster,
            desired_count=2)

        # Add autoscaling to the service
        scaling = catalogservice.auto_scale_task_count(max_capacity=20,
                                                       min_capacity=1)
        scaling.scale_on_cpu_utilization(
            'ScaleOnCPU',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(amount=1),
            scale_out_cooldown=core.Duration.seconds(amount=0))

        # Associate the fargate service with load balancer targetgroup
        catalogservice.attach_to_application_target_group(catalogtargetgroup)
Пример #18
0
    def __init__(self, scope: core.Construct, id: str, deploy_env: str,
                 vpc: aws_ec2.Vpc, db_redis_stack: RdsElasticacheEfsStack,
                 config: dict, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.config = config
        self.deploy_env = deploy_env
        self.db_port = DB_PORT
        # cannot map volumes to Fargate task defs yet - so this is done via Boto3 since CDK does not
        # support it yet: https://github.com/aws/containers-roadmap/issues/825
        #self.efs_file_system_id = db_redis_stack.efs_file_system_id
        cluster_name = get_cluster_name(deploy_env)
        self.cluster = ecs.Cluster(self,
                                   cluster_name,
                                   cluster_name=cluster_name,
                                   vpc=vpc)
        pwd_secret = ecs.Secret.from_ssm_parameter(
            StringParameter.from_secure_string_parameter_attributes(
                self,
                f"dbpwd-{deploy_env}",
                version=1,
                parameter_name="postgres_pwd"))
        self.secrets = {"POSTGRES_PASSWORD": pwd_secret}
        environment = {
            "EXECUTOR":
            "Celery",
            "POSTGRES_HOST":
            db_redis_stack.db_host,
            "POSTGRES_PORT":
            str(self.db_port),
            "POSTGRES_DB":
            "airflow",
            "POSTGRES_USER":
            self.config["dbadmin"],
            "REDIS_HOST":
            db_redis_stack.redis_host,
            "VISIBILITY_TIMEOUT":
            str(self.config["celery_broker_visibility_timeout"])
        }
        image_asset = DockerImageAsset(self,
                                       "AirflowImage",
                                       directory="build",
                                       repository_name=config["ecr_repo_name"])
        self.image = ecs.ContainerImage.from_docker_image_asset(image_asset)
        # web server - this initializes the db so must happen first
        self.web_service = self.airflow_web_service(environment)
        # https://github.com/aws/aws-cdk/issues/1654
        self.web_service_sg().connections.allow_to_default_port(
            db_redis_stack.postgres_db, 'allow PG')
        redis_port_info = Port(protocol=Protocol.TCP,
                               string_representation="allow to redis",
                               from_port=REDIS_PORT,
                               to_port=REDIS_PORT)
        worker_port_info = Port(protocol=Protocol.TCP,
                                string_representation="allow to worker",
                                from_port=AIRFLOW_WORKER_PORT,
                                to_port=AIRFLOW_WORKER_PORT)
        redis_sg = SecurityGroup.from_security_group_id(
            self,
            id=f"Redis-SG-{deploy_env}",
            security_group_id=db_redis_stack.redis.vpc_security_group_ids[0])
        bastion_sg = db_redis_stack.bastion.connections.security_groups[0]
        self.web_service_sg().connections.allow_to(redis_sg, redis_port_info,
                                                   'allow Redis')
        self.web_service_sg().connections.allow_to_default_port(
            db_redis_stack.efs_file_system)
        # scheduler
        self.scheduler_service = self.create_scheduler_ecs_service(environment)
        # worker
        self.worker_service = self.worker_service(environment)
        self.scheduler_sg().connections.allow_to_default_port(
            db_redis_stack.postgres_db, 'allow PG')
        self.scheduler_sg().connections.allow_to(redis_sg, redis_port_info,
                                                 'allow Redis')
        self.scheduler_sg().connections.allow_to_default_port(
            db_redis_stack.efs_file_system)

        self.worker_sg().connections.allow_to_default_port(
            db_redis_stack.postgres_db, 'allow PG')
        self.worker_sg().connections.allow_to(redis_sg, redis_port_info,
                                              'allow Redis')
        self.worker_sg().connections.allow_to_default_port(
            db_redis_stack.efs_file_system)
        # When you start an airflow worker, airflow starts a tiny web server
        # subprocess to serve the workers local log files to the airflow main
        # web server, who then builds pages and sends them to users. This defines
        # the port on which the logs are served. It needs to be unused, and open
        # visible from the main web server to connect into the workers.
        self.web_service_sg().connections.allow_to(self.worker_sg(),
                                                   worker_port_info,
                                                   'web service to worker')
    def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

	#############################################
	#Import resorce and custom setting part start
	#############################################
        #cn-north-1
        impRes={
                 "vpc":"vpc-0883083ff3a10c1ec",
                 "SvcSG":"sg-04d3b60e954c1c1ef",
                 "ALBSG":"sg-0b6d093d52d48bba9",
                 "ALBInternet":True,
                 "taskRole":"arn:aws-cn:iam::627484392488:role/ecsTaskExecutionRole",
                 "AlbSubnet":[
                       {"subnetId":"subnet-0d16fa0c969f234d3",
                        "routeTabId":"rtb-074c6b532f3030ad6"},
                       {"subnetId":"subnet-0f28a97c04d3b11cd",
                        "routeTabId":"rtb-074c6b532f3030ad6"}
                 ],
                 #"SvcSubNet":[{"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-074c6b532f3030ad6"}]
                 "SvcSubNet":[{"subnetId":"subnet-0f28a97c04d3b11cd","routeTabId":"rtb-0587cc522717461cd"},
                              {"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-0587cc522717461cd"}]
               }
        newRes={
                 "TG":{"HealthPath":"/test.html","Port":80,"containPort":80},
                 "Listener":{"Port":80},
                 "TaskFamily":"tsFargate",
                 "ImageAsset1":{"DockfilePath":"httpd-ssh",
                                "BuildArgs":{"HTTP_PROXY":"http://YOUR_PROXY_SERVER:80"}
                               }
               }

        MyTaskDefinition=[{"Cpu":512,"MemLimitMib":1024}]
        MyContainerDefinition=[
             {"containerName":"MyContainer1",
              "cpu":256,
              "essential":True,
              "portMappings":[ecs.PortMapping(container_port=80,host_port=80)], #"portMappings":[ecs.PortMapping(container_port=80,host_port=80),ecs.PortMapping(container_port=22,host_port=22)],
              "environment":{"SSH_PUBLIC_KEY":"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC/alWrS+HH5KkPbso+Tsy+Z0WGTX5wvXvon5OacLMyOU3gj2mbbIifasXf/RadpuywuyW3uFirtRlPmSb5Q0PVLODku503Xettw+u6/Z22VV7F2ACgg4iHaCo2SR4L8saUrLLfcKXKr/WCn3w7uYcqGsXEcSFCCSZgn4BoZJqP4Q=="},
              "LogMountPoint":["/usr/local/apache2/logs"]
             }
        ]
        MySvc={"AssignPubIp":True, "desiredCount":1}
	#############################################
	#Import resorce and custom setting part end
	#############################################
		
        #if you import external resource app you cannot set destory policy
        #import VPC, Private Subnet, SG
        vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_id=impRes["vpc"])	
        
        #import SG
        mysvcsg = ec2.SecurityGroup.from_security_group_id(self, "svcsg", 
                impRes["SvcSG"],
                mutable=False)
        
        #import Role
        taskRole = iam.Role.from_role_arn(self, "TaskRole",impRes["taskRole"])
        
        #create ALB        
        mytargetGrp = elbv2.ApplicationTargetGroup(self, "targetGrp",
                target_type=elbv2.TargetType.IP,
                port=newRes["TG"]["Port"],
                vpc=vpc,
                health_check=elbv2.HealthCheck(path=newRes["TG"]["HealthPath"]))
        #target group cannot use .apply_removal_policy directly
        cfn_mytargetGrp=mytargetGrp.node.find_child("Resource")
        cfn_mytargetGrp.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
		
        #import public subnet for alb
        albsubnets = [
                ec2.Subnet.from_subnet_attributes(self,'albsubnetid1',
                    subnet_id = impRes["AlbSubnet"][0]["subnetId"], 
                    route_table_id=impRes["AlbSubnet"][0]["routeTabId"]
                ),
                ec2.Subnet.from_subnet_attributes(self,'albsubnetid2',
                    subnet_id = impRes["AlbSubnet"][1]["subnetId"], 
                    route_table_id=impRes["AlbSubnet"][1]["routeTabId"]
                )
        ]		
        vpc_subnets_selection = ec2.SubnetSelection(subnets=albsubnets)
        #create new ALB
        myalb = elbv2.ApplicationLoadBalancer(self, "ALBv2",
                vpc=vpc,
                security_group=ec2.SecurityGroup.from_security_group_id(self, "ALBSG", impRes["ALBSG"],mutable=False),
                internet_facing=impRes["ALBInternet"],
                vpc_subnets=vpc_subnets_selection)
        myalb.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        #create new ALB listener
        myalblistener = elbv2.ApplicationListener(self, "ALBlistenter", 
                load_balancer=myalb, 
                port=newRes["Listener"]["Port"])
        myalblistener.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
        myalblistener.add_target_groups("albaddtg", target_groups=[mytargetGrp])
        
        
        #create new ECS Cluster
        mycluster = ecs.Cluster(self, "cluster", vpc=vpc)
        mycluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
        
        fargatetaskDefinition = ecs.FargateTaskDefinition(self, "fargatetaskDefinition",
                cpu=MyTaskDefinition[0]["Cpu"],
                memory_limit_mib=MyTaskDefinition[0]["MemLimitMib"],
                execution_role=taskRole,
                family=newRes["TaskFamily"],
                task_role=taskRole)
                #volumes=myEfsVols)      
        fargatetaskDefinition.apply_removal_policy(cdk.RemovalPolicy.DESTROY)


        #defind docker image asset
        dirname = os.path.dirname(__file__)
        #for container 1 normally httpd
        #create Image assent image will generated locally then push to ecr
        asset1 = DockerImageAsset(self, "ImageAsset1",
                   directory=os.path.join(dirname, "../..", newRes["ImageAsset1"]["DockfilePath"]),
                   build_args=newRes["ImageAsset1"]["BuildArgs"]
                )
         
        #create container definition for task definition
        MyContainer1def = ecs.ContainerDefinition(self, "MyContainer1def",
                task_definition=fargatetaskDefinition,
                linux_parameters=ecs.LinuxParameters(self,"LinuxPara1",init_process_enabled=True),
                image=ecs.ContainerImage.from_ecr_repository(asset1.repository, asset1.image_uri.rpartition(":")[-1]),
                container_name=MyContainerDefinition[0]["containerName"],
                essential=MyContainerDefinition[0]["essential"],
                port_mappings=MyContainerDefinition[0]["portMappings"],
                environment=MyContainerDefinition[0]["environment"]
        )
	
	#import service private subnet
        mysvcprivateSNs = [
                ec2.Subnet.from_subnet_attributes(self,'svcprivateSN1',
                    subnet_id = impRes["SvcSubNet"][0]["subnetId"], 
                    route_table_id=impRes["SvcSubNet"][0]["routeTabId"]),
                ec2.Subnet.from_subnet_attributes(self,'svcprivateSN2',
                    subnet_id = impRes["SvcSubNet"][1]["subnetId"], 
                    route_table_id=impRes["SvcSubNet"][1]["routeTabId"])
        ]

        #create service
        myservice=ecs.FargateService(self,"service",
                task_definition=fargatetaskDefinition,
                assign_public_ip=MySvc["AssignPubIp"],
                platform_version=ecs.FargatePlatformVersion.VERSION1_4,
                vpc_subnets=ec2.SubnetSelection(subnets=mysvcprivateSNs),
                security_group=mysvcsg,
                cluster=mycluster,
                desired_count=MySvc["desiredCount"])
        
        mytargetGrp.add_target(myservice.load_balancer_target(container_name="MyContainer1",container_port=newRes["TG"]["containPort"], protocol=ecs.Protocol.TCP))
Пример #20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        cluster = ecs.Cluster(
            self, "EC2Cluster",
            vpc=vpc
        )

        security_group = ec2.SecurityGroup(
            self, "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=True,
        )

        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.all_tcp(),
            description="Allow all traffic"
        )

        app_target_group = elbv2.ApplicationTargetGroup(
            self, "AppTargetGroup",
            port=http_port,
            vpc=vpc,
            target_type=elbv2.TargetType.IP,
        )

        elastic_loadbalancer = elbv2.ApplicationLoadBalancer(
            self, "ALB",
            vpc=vpc,
            internet_facing=True,
            security_group=security_group,
        )

        app_listener = elbv2.ApplicationListener(
            self, "AppListener",
            load_balancer=elastic_loadbalancer,
            port=http_port,
            default_target_groups=[app_target_group],
        )

        task_definition = ecs.TaskDefinition(
            self, "TaskDefenition",
            compatibility=ecs.Compatibility.FARGATE,
            cpu=task_def_cpu,
            memory_mib=task_def_memory_mb,
        )

        container_defenition = ecs.ContainerDefinition(
            self, "ContainerDefenition",
            image=ecs.ContainerImage.from_registry("vulnerables/web-dvwa"),
            task_definition=task_definition,
            logging=ecs.AwsLogDriver(
                stream_prefix="DemoContainerLogs",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )

        container_defenition.add_port_mappings(
            ecs.PortMapping(
                container_port=http_port,
            )
        )

        fargate_service = ecs.FargateService(
            self, "FargateService",
            task_definition=task_definition,
            cluster=cluster,
            security_group=security_group,
        )

        fargate_service.attach_to_application_target_group(
            target_group=app_target_group,
        )

        core.CfnOutput(
        self, "LoadBalancerDNS",
        value=elastic_loadbalancer.load_balancer_dns_name
        )
Пример #21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        parse_image_list_file = aws_lambda.Function(
            self,
            'parse_image_list_file',
            handler='parse_image_list_file.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('parse_image_list_file'),
            memory_size=10240,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        list_objects = aws_lambda.Function(
            self,
            'list_objects',
            handler='list_objects.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('list_objects'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        get_size_and_store = aws_lambda.Function(
            self,
            'get_size_and_store',
            handler='get_size_and_store.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('get_size_and_store'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        images_bucket = aws_s3.Bucket(self, "images_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"],
            resources=["*"])

        parse_image_list_file.add_to_role_policy(
            lambda_supplemental_policy_statement)
        list_objects.add_to_role_policy(lambda_supplemental_policy_statement)
        get_size_and_store.add_to_role_policy(
            lambda_supplemental_policy_statement)

        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        # notification_topic = aws_sns.Topic(self, "notification_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        images_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.LambdaDestination(parse_image_list_file))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        comprehend_queue_iqueue = aws_sqs.Queue(self,
                                                "comprehend_queue_iqueue")
        comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=comprehend_queue_iqueue)
        comprehend_queue = aws_sqs.Queue(
            self,
            "comprehend_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=comprehend_queue_iqueue_dlq)

        rekognition_queue_iqueue = aws_sqs.Queue(self,
                                                 "rekognition_queue_iqueue")
        rekognition_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=rekognition_queue_iqueue)
        rekognition_queue = aws_sqs.Queue(
            self,
            "rekognition_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=rekognition_queue_dlq)

        object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue")
        object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10,
                                                   queue=object_queue_iqueue)
        object_queue = aws_sqs.Queue(
            self,
            "object_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=object_queue_dlq)

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        get_size_and_store.add_event_source(
            SqsEventSource(object_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################
        s3workflow_domain = aws_elasticsearch.Domain(
            self,
            "s3workflow_domain",
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3workflow_pool = aws_cognito.UserPool(
            self,
            "s3workflow-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        ###########################################################################
        # AMAZON VPC
        ###########################################################################
        vpc = aws_ec2.Vpc(self, "s3workflowVPC",
                          max_azs=3)  # default is all AZs in region

        ###########################################################################
        # AMAZON ECS CLUSTER
        ###########################################################################
        cluster = aws_ecs.Cluster(self, "s3", vpc=vpc)

        ###########################################################################
        # AMAZON ECS Repositories
        ###########################################################################
        rekognition_repository = aws_ecr.Repository(
            self,
            "rekognition_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))
        comprehend_repository = aws_ecr.Repository(
            self,
            "comprehend_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))

        ###########################################################################
        # AMAZON ECS Roles and Policies
        ###########################################################################
        task_execution_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*"
            ],
            resources=["*"])
        task_execution_policy_document = aws_iam.PolicyDocument()
        task_execution_policy_document.add_statements(
            task_execution_policy_statement)
        task_execution_policy = aws_iam.Policy(
            self,
            "task_execution_policy",
            document=task_execution_policy_document)
        task_execution_role = aws_iam.Role(
            self,
            "task_execution_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_execution_role.attach_inline_policy(task_execution_policy)

        task_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*",
                "comprehend:*", "es:*"
            ],
            resources=["*"])
        task_policy_document = aws_iam.PolicyDocument()
        task_policy_document.add_statements(task_policy_statement)
        task_policy = aws_iam.Policy(self,
                                     "task_policy",
                                     document=task_policy_document)
        task_role = aws_iam.Role(
            self,
            "task_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_role.attach_inline_policy(task_policy)

        ###########################################################################
        # AMAZON ECS Task definitions
        ###########################################################################
        rekognition_task_definition = aws_ecs.TaskDefinition(
            self,
            "rekognition_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        comprehend_task_definition = aws_ecs.TaskDefinition(
            self,
            "comprehend_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        ###########################################################################
        # AMAZON ECS Images
        ###########################################################################
        rekognition_ecr_image = aws_ecs.EcrImage(
            repository=rekognition_repository, tag="latest")
        comprehend_ecr_image = aws_ecs.EcrImage(
            repository=comprehend_repository, tag="latest")

        ###########################################################################
        # ENVIRONMENT VARIABLES
        ###########################################################################
        environment_variables = {}
        environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url
        environment_variables[
            "REKOGNITION_QUEUE"] = rekognition_queue.queue_url
        environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name
        environment_variables[
            "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint

        parse_image_list_file.add_environment(
            "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint)
        parse_image_list_file.add_environment("QUEUEURL",
                                              rekognition_queue.queue_url)
        parse_image_list_file.add_environment("DEBUG", "False")
        parse_image_list_file.add_environment("BUCKET", "-")
        parse_image_list_file.add_environment("KEY", "-")

        list_objects.add_environment("QUEUEURL", object_queue.queue_url)
        list_objects.add_environment("ELASTICSEARCH_HOST",
                                     s3workflow_domain.domain_endpoint)
        list_objects.add_environment("S3_BUCKET_NAME",
                                     images_bucket.bucket_name)
        list_objects.add_environment("S3_BUCKET_PREFIX", "images/")
        list_objects.add_environment("S3_BUCKET_SUFFIX", "")
        list_objects.add_environment("LOGGING_LEVEL", "INFO")

        get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url)
        get_size_and_store.add_environment("ELASTICSEARCH_HOST",
                                           s3workflow_domain.domain_endpoint)
        get_size_and_store.add_environment("S3_BUCKET_NAME",
                                           images_bucket.bucket_name)
        get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/")
        get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "")
        get_size_and_store.add_environment("LOGGING_LEVEL", "INFO")

        ###########################################################################
        # ECS Log Drivers
        ###########################################################################
        rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))
        comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))

        ###########################################################################
        # ECS Task Definitions
        ###########################################################################
        rekognition_task_definition.add_container(
            "rekognition_task_definition",
            image=rekognition_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=rekognition_task_log_driver)

        comprehend_task_definition.add_container(
            "comprehend_task_definition",
            image=comprehend_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=comprehend_task_log_driver)

        ###########################################################################
        # AWS ROUTE53 HOSTED ZONE
        ###########################################################################
        hosted_zone = aws_route53.HostedZone(
            self,
            "hosted_zone",
            zone_name="s3workflow.com",
            comment="private hosted zone for s3workflow system")
        hosted_zone.add_vpc(vpc)
Пример #22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # ==============================
        # ======= CFN PARAMETERS =======
        # ==============================
        project_name_param = core.CfnParameter(scope=self, id='ProjectName', type='String')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = f'{project_name_param.value_as_string}-artifacts-{core.Aws.ACCOUNT_ID}'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        # ==================================================
        # ================= IAM ROLE =======================
        # ==================================================
        role = iam.Role(scope=self, id='TASKROLE', assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess'))
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonECS_FullAccess'))

        # ==================================================
        # ================== SECRET ========================
        # ==================================================
        db_password_secret = sm.Secret(
            scope=self,
            id='DBSECRET',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True)
        )
        

        # ==================================================
        # ==================== VPC =========================
        # ==================================================
        #public_subnet = ec2.SubnetConfiguration(name='Public', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=28)
        #dev-shared-public-subnet-az1
        #private_subnet = ec2.SubnetConfiguration(name='Private', subnet_type=ec2.SubnetType.PRIVATE, cidr_mask=28)
        #dev-shared-private-subnet-az1
        #isolated_subnet = ec2.SubnetConfiguration(name='DB', subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=28) 
        #dev-shared-private-subnet-az1

        #use existing (is needed later for fargete)
        """ vpc = ec2.Vpc(
            scope=self,
            id='VPC',
            cidr='10.0.0.0/24',
            max_azs=2,
            nat_gateway_provider=ec2.NatProvider.gateway(),
            nat_gateways=1,
            subnet_configuration=[public_subnet, private_subnet, isolated_subnet]
        ) """

        """ stack = MyStack(
            app, "MyStack", env=Environment(account="account_id", region="region")
        ) """

        vpc = ec2.Vpc.from_lookup(self, "VPC",
            vpc_id = "vpc-03076add1b1efca31" #is_default=True
        ) #TODO: fill in correct arguments
        #vpc_id = "vpc-03076add1b1efca31"

        #leave, should be fine, if not check (is nto NAT gateway)

        #original: vpc.add_gateway_endpoint('S3Endpoint', service=ec2.GatewayVpcEndpointAwsService.S3)
        # ==================================================
        # ================= S3 BUCKET ======================
        # ==================================================
        artifact_bucket = s3.Bucket(
            scope=self,
            id='ARTIFACTBUCKET',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY
        )
        # # ==================================================
        # # ================== DATABASE  =====================
        # # ==================================================
        # Creates a security group for AWS RDS
        sg_rds = ec2.SecurityGroup(scope=self, id='SGRDS', vpc=vpc, security_group_name='sg_rds')
        # Adds an ingress rule which allows resources in the VPC's CIDR to access the database.
        #original: sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('10.0.0.0/24'), connection=ec2.Port.tcp(port))
        sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('10.206.192.0/19'), connection=ec2.Port.tcp(port))
        #10.206.192.0/19

        database = rds.DatabaseInstance(
            scope=self,
            id='MYSQL',
            database_name=db_name,
            port=port,
            credentials=rds.Credentials.from_username(username=username, password=db_password_secret.secret_value),
            engine=rds.DatabaseInstanceEngine.mysql(version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            security_groups=[sg_rds],
            #vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.ISOLATED),
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), #TODO: check if you need to select private here and how
            # multi_az=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False
        )
        # ==================================================
        # =============== FARGATE SERVICE ==================
        # ==================================================
        
        cluster = ecs.Cluster(scope=self, id='CLUSTER', cluster_name=cluster_name, vpc=vpc)

        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id='MLflow',
            task_role=role,

        )

        container = task_definition.add_container(
            id='Container',
            image=ecs.ContainerImage.from_asset(
                directory='container',
                repository_name=container_repo_name
            ),
            environment={
                'BUCKET': f's3://{artifact_bucket.bucket_name}',
                'HOST': database.db_instance_endpoint_address,
                'PORT': str(port),
                'DATABASE': db_name,
                'USERNAME': username
            },
            secrets={
                'PASSWORD': ecs.Secret.from_secrets_manager(db_password_secret)
            }
        )
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id='MLFLOW',
            service_name=service_name,
            cluster=cluster,
            task_definition=task_definition
        )

        # Setup security group
        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(5000),
            description='Allow inbound from VPC for mlflow'
        )

        # Setup autoscaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id='AUTOSCALING',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60)
        )
        # ==================================================
        # =================== OUTPUTS ======================
        # ==================================================
        core.CfnOutput(scope=self, id='LoadBalancerDNS', value=fargate_service.load_balancer.load_balancer_dns_name)
Пример #23
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        branch = kwargs['env']['branch']
        # create s3 bucket for CodeCommit artifacts
        s3_bucket = S3_Bucket(self, 's3-' + branch)

        # Create IAM roles for git merge and CodeBuild
        roles = Roles(self, 'roles-' + branch)

        # Define New Codepipeline
        pipeline = ServicePipeline(self,
                                   'pipeline-' + branch,
                                   bucket=s3_bucket.ArtifactBucket)

        # Create GitHub Account
        github = GitHub(self,
                        'GitHubSource-' + branch,
                        pipeline=pipeline.pipeline)

        # Create ECR Repo
        ecr_repo = ECRRepo(
            self,
            'ECRRepo-' + branch,
        )

        # Create CodeBuild
        GitCodeBuild(
            self,
            'CodeBuild-' + branch,
            source=github.sourceOutput,
            pipeline=pipeline.pipeline,
            bucket=s3_bucket.ArtifactBucket,
            role=roles.CodeBuildServiceRole,
            frontend=ecr_repo.flask.repository_uri,
        )

        # Create VPC for the ecs
        vpc = ec2.Vpc(
            self,
            "MyVPC-" + branch,
            max_azs=2,
        )

        # Create ECS cluster
        cluster = ecs.Cluster(
            self,
            'EC2-Cluster-' + branch,
            vpc=vpc,
        )

        # Add Auto Scaling Group
        for i in range(3):
            cluster.add_capacity(
                "DefaultAutoScalingGroup-" + str(i) + '-' + branch,
                instance_type=ec2.InstanceType("t2.medium"),
                allow_all_outbound=True,
                key_name=os.environ['KEYNAME'],
                # vpc_subnets=vpc.public_subnets
            )

        if branch == 'master':
            # Add HostedZone
            hosted_zone = route53.HostedZone(
                self,
                'hosted-zone-' + branch,
                # hosted_zone_id='Z3HNUDRBTJMWFV',
                zone_name='wiki-trend.com')
            domain_name = 'wiki-trend.com'
        else:
            hosted_zone = None
            domain_name = None

        # Add Load Balancer
        ecs_service = ecs_patterns.LoadBalancedEc2Service(
            self,
            'Ec2Service-' + branch,
            cluster=cluster,
            # service_name='Frontend-1-'+branch,
            memory_limit_mib=2048,
            container_port=80,
            environment={
                "PORT": '80',
                'NEO4J_USER': os.environ['NEO4J_USER'],
                'NEO4J_PASSWORD': os.environ['NEO4J_PASSWORD']
            },
            domain_name=domain_name,
            domain_zone=hosted_zone,
            # image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"),
            image=ecs.ContainerImage.from_ecr_repository(ecr_repo.flask),
            public_load_balancer=True,
        )

        core.CfnOutput(
            self,
            'ECRRepoURI-' + branch,
            description="The URI of the ECR Repo for flask frontend",
            value=ecr_repo.flask.repository_uri)

        core.CfnOutput(
            self,
            "CodePipelinURL-" + branch,
            description="The URL of the created Pipeline",
            value=
            "https://{}.console.aws.amazon.com/codepipeline/home?region={}#/view/{}"
            .format(os.environ['AWS_REGION'], os.environ['AWS_REGION'],
                    pipeline.pipeline.pipeline_name))

        core.CfnOutput(self,
                       "LoadBalancerDNS-" + branch,
                       value=ecs_service.load_balancer.load_balancer_dns_name)
Пример #24
0
    def create_fagate_NLB_autoscaling_custom(self, vpc, **kwargs):
        ####################
        # Unpack Value for name/ecr_repo
        app_name = kwargs['function'].replace("_", "-")
        task_name = "{}-task-definition".format(app_name)
        log_name = app_name
        image_name = "{}-image".format(app_name)
        container_name = "{}-container".format(app_name)
        service_name = "{}-service".format(app_name)

        app_ecr = kwargs['ecr']
        ecs_role = kwargs['ecs_role']

        ####################
        # Create Cluster
        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        ####################
        # Config IAM Role
        # add managed policy statement
        # ecs_base_role = iam.Role(
        #     self,
        #     "ecs_service_role",
        #     assumed_by=iam.ServicePrincipal("ecs.amazonaws.com")
        # )
        # ecs_role = ecs_base_role.from_role_arn(self, 'gw-ecr-role-test', role_arn='arn:aws:iam::002224604296:role/ecsTaskExecutionRole')

        ####################
        # Create Fargate Task Definition
        fargate_task = ecs.FargateTaskDefinition(self,
                                                 task_name,
                                                 execution_role=ecs_role,
                                                 task_role=ecs_role,
                                                 cpu=2048,
                                                 memory_limit_mib=8192)
        # 0. config log
        ecs_log = ecs.LogDrivers.aws_logs(stream_prefix=log_name)
        # 1. prepare ecr repository
        ecr_repo = ecr.Repository.from_repository_name(self,
                                                       id=image_name,
                                                       repository_name=app_ecr)
        farget_container = fargate_task.add_container(
            container_name,
            image=ecs.ContainerImage.from_ecr_repository(ecr_repo),
            logging=ecs_log,
            environment={
                'KG_PATH': "s3://autorec-1",
                "REDIS_URL": self.redis_host,
                "REDIS_PORT": self.redis_port
            })
        # 2. config port mapping
        port_mapping = ecs.PortMapping(container_port=9008,
                                       host_port=9008,
                                       protocol=ecs.Protocol.TCP)
        farget_container.add_port_mappings(port_mapping)

        ####################
        # Config NLB service
        # fargate_service = ecs.FargateService(self, 'graph-inference-service',
        #     cluster=cluster, task_definition=fargate_task, assign_public_ip=True
        # )
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            service_name,
            cluster=cluster,
            task_definition=fargate_task,
            assign_public_ip=True,
            desired_count=20,
            listener_port=9008)
        # 0. allow inbound in sg
        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(
                # peer = ec2.Peer.ipv4(vpc.vpc_cidr_block),
                peer=ec2.Peer.ipv4('0.0.0.0/0'),
                connection=ec2.Port.tcp(9008),
                description="Allow http inbound from VPC")

        # 1. setup autoscaling policy
        # autoscaling 自动scale
        #         scaling = fargate_service.service.auto_scale_task_count(
        #             max_capacity=50
        #         )
        #         scaling.scale_on_cpu_utilization(
        #             "CpuScaling",
        #             target_utilization_percent=50,
        #             scale_in_cooldown=core.Duration.seconds(60),
        #             scale_out_cooldown=core.Duration.seconds(60),
        #         )

        return fargate_service.load_balancer.load_balancer_dns_name
class MLflowStack(core.Stack):

    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        ##
        ##Parametros gerais utilizados para provisioamento de infra
        ##
        project_name_param = core.CfnParameter(scope=self, id='mlflowStack', type='String', default='mlflowStack')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = 'mlflowbucket-track-stack'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        #Associação das policys gerenciadas a role que sera atribuida a task ECS.        
        role = iam.Role(scope=self, id='TASKROLE', assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonECS_FullAccess'))

        #Secrets Manager responsavel pelo armazenamento do password do nosso RDS MySQL
        db_password_secret = sm.Secret(
            scope=self,
            id='dbsecret',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True)
        )

         #Criação do Bucket S3
        artifact_bucket = s3.Bucket(
            scope=self,
            id='mlflowstacktrack',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY
        )

      #Obtenção de VPC para atribuição ao RDS
      dev_vpc = ec2.Vpc.from_vpc_attributes(
            self, '<VPC_NAME>',
            vpc_id = "<VPC_ID>",
            availability_zones = core.Fn.get_azs(),
            private_subnet_ids = ["PRIVATE_SUBNET_ID_1","PRIVATE_SUBNET_ID_2","PRIVATE_SUBNET_ID_3"]
       )

        # Adicionamos aqui para efeito de testes 0.0.0.0/0
        sg_rds = ec2.SecurityGroup(scope=self, id='SGRDS', security_group_name='sg_rds', vpc=dev_vpc)
        
        sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(port))

        # Criação da instancia RDS
        database = rds.DatabaseInstance(
            scope=self,
            id='MYSQL',
            database_name=db_name,
            port=port,
            credentials=rds.Credentials.from_username(username=username, password=db_password_secret.secret_value),
            engine=rds.DatabaseInstanceEngine.mysql(version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),            
            security_groups=[sg_rds],
            vpc=dev_vpc,            
            # multi_az=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False
        )

        #Criação do Cluster ECS
        cluster = ecs.Cluster(scope=self, id='CLUSTER', cluster_name=cluster_name)
        #Task Definition para Fargate
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id='MLflow',
            task_role=role,

        )
        #Criando nosso container com base no Dockerfile do MLflow
        container = task_definition.add_container(
            id='Container',
            image=ecs.ContainerImage.from_asset(
                directory='../MLflow/container',
                repository_name=container_repo_name
            ),
             #Atribuição Variaves ambiente
            environment={
                'BUCKET': f's3://{artifact_bucket.bucket_name}',
                'HOST': database.db_instance_endpoint_address,
                'PORT': str(port),
                'DATABASE': db_name,
                'USERNAME': username
            },
            #Secrets contendo o password do RDS MySQL
            secrets={
                'PASSWORD': ecs.Secret.from_secrets_manager(db_password_secret)
            }
        )
        #Port Mapping para exposição do Container MLflow
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id='MLFLOW',
            service_name=service_name,
            cluster=cluster,
            task_definition=task_definition
        )

        #Security group para ingress
        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4('0.0.0.0/0'),
            connection=ec2.Port.tcp(5000),
            description='Allow inbound for mlflow'
        )

        #Auto Scaling Policy para nosso balanceador
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id='AUTOSCALING',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(60)
        )
     
        core.CfnOutput(scope=self, id='LoadBalancerDNS', value=fargate_service.load_balancer.load_balancer_dns_name)
Пример #26
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        # This resource alone will create a private/public subnet in each AZ as well as nat/internet gateway(s)
        self.vpc = aws_ec2.Vpc(
            self,
            "BaseVPC",
            cidr='10.0.0.0/24',
        )

        # Creating ECS Cluster in the VPC created above
        self.ecs_cluster = aws_ecs.Cluster(self,
                                           "ECSCluster",
                                           vpc=self.vpc,
                                           cluster_name="container-demo")

        # Adding service discovery namespace to cluster
        self.ecs_cluster.add_default_cloud_map_namespace(name="service", )

        ###### EC2 SPOT CAPACITY PROVIDER SECTION ######

        ## As of today, AWS CDK doesn't support Launch Templates on the AutoScaling construct, hence it
        ## doesn't support Mixed Instances Policy to combine instance types on Auto Scaling and adhere to Spot best practices
        ## In the meantime, CfnLaunchTemplate and CfnAutoScalingGroup resources are used to configure Spot capacity
        ## https://github.com/aws/aws-cdk/issues/6734

        self.ecs_spot_instance_role = aws_iam.Role(
            self,
            "ECSSpotECSInstanceRole",
            assumed_by=aws_iam.ServicePrincipal("ec2.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonEC2ContainerServiceforEC2Role"),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonEC2RoleforSSM")
            ])

        self.ecs_spot_instance_profile = aws_iam.CfnInstanceProfile(
            self,
            "ECSSpotInstanceProfile",
            roles=[self.ecs_spot_instance_role.role_name])

        ## This creates a Launch Template for the Auto Scaling group
        self.lt = aws_ec2.CfnLaunchTemplate(
            self,
            "ECSEC2SpotCapacityLaunchTemplate",
            launch_template_data={
                "instanceType":
                "m5.large",
                "imageId":
                aws_ssm.StringParameter.value_for_string_parameter(
                    self,
                    "/aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id"
                ),
                "securityGroupIds": [
                    x.security_group_id
                    for x in self.ecs_cluster.connections.security_groups
                ],
                "iamInstanceProfile": {
                    "arn": self.ecs_spot_instance_profile.attr_arn
                },
                #
                ## Here we configure the ECS agent to drain Spot Instances upon catching a Spot Interruption notice from instance metadata
                "userData":
                core.Fn.base64(
                    core.Fn.sub(
                        "#!/usr/bin/bash\n"
                        "echo ECS_CLUSTER=${cluster_name} >> /etc/ecs/ecs.config\n"
                        "sudo iptables --insert FORWARD 1 --in-interface docker+ --destination 169.254.169.254/32 --jump DROP\n"
                        "sudo service iptables save\n"
                        "echo ECS_ENABLE_SPOT_INSTANCE_DRAINING=true >> /etc/ecs/ecs.config\n"
                        "echo ECS_AWSVPC_BLOCK_IMDS=true >> /etc/ecs/ecs.config\n"
                        "cat /etc/ecs/ecs.config",
                        variables={
                            "cluster_name": self.ecs_cluster.cluster_name
                        }))
            },
            launch_template_name="ECSEC2SpotCapacityLaunchTemplate")

        self.ecs_ec2_spot_mig_asg = aws_autoscaling.CfnAutoScalingGroup(
            self,
            "ECSEC2SpotCapacity",
            min_size="0",
            max_size="10",
            vpc_zone_identifier=[
                x.subnet_id for x in self.vpc.private_subnets
            ],
            mixed_instances_policy={
                "instancesDistribution": {
                    "onDemandAllocationStrategy": "prioritized",
                    "onDemandBaseCapacity": 0,
                    "onDemandPercentageAboveBaseCapacity": 0,
                    "spotAllocationStrategy": "capacity-optimized"
                },
                "launchTemplate": {
                    "launchTemplateSpecification": {
                        "launchTemplateId": self.lt.ref,
                        "version": self.lt.attr_default_version_number
                    },
                    "overrides": [{
                        "instanceType": "m5.large"
                    }, {
                        "instanceType": "m5d.large"
                    }, {
                        "instanceType": "m5a.large"
                    }, {
                        "instanceType": "m5ad.large"
                    }, {
                        "instanceType": "m5n.large"
                    }, {
                        "instanceType": "m5dn.large"
                    }, {
                        "instanceType": "m3.large"
                    }, {
                        "instanceType": "m4.large"
                    }, {
                        "instanceType": "t3.large"
                    }, {
                        "instanceType": "t2.large"
                    }]
                }
            })
        #
        core.Tag.add(self.ecs_ec2_spot_mig_asg, "Name",
                     self.ecs_ec2_spot_mig_asg.node.path)
        core.CfnOutput(self,
                       "EC2SpotAutoScalingGroupName",
                       value=self.ecs_ec2_spot_mig_asg.ref,
                       export_name="EC2SpotASGName")
        #
        ##### END EC2 SPOT CAPACITY PROVIDER SECTION #####

        # Namespace details as CFN output
        self.namespace_outputs = {
            'ARN':
            self.ecs_cluster.default_cloud_map_namespace.
            private_dns_namespace_arn,
            'NAME':
            self.ecs_cluster.default_cloud_map_namespace.
            private_dns_namespace_name,
            'ID':
            self.ecs_cluster.default_cloud_map_namespace.
            private_dns_namespace_id,
        }

        # Cluster Attributes
        self.cluster_outputs = {
            'NAME': self.ecs_cluster.cluster_name,
            'SECGRPS': str(self.ecs_cluster.connections.security_groups)
        }

        # When enabling EC2, we need the security groups "registered" to the cluster for imports in other service stacks
        if self.ecs_cluster.connections.security_groups:
            self.cluster_outputs['SECGRPS'] = str([
                x.security_group_id
                for x in self.ecs_cluster.connections.security_groups
            ][0])

        # Frontend service to backend services on 3000
        self.services_3000_sec_group = aws_ec2.SecurityGroup(
            self,
            "FrontendToBackendSecurityGroup",
            allow_all_outbound=True,
            description=
            "Security group for frontend service to talk to backend services",
            vpc=self.vpc)

        # Allow inbound 3000 from ALB to Frontend Service
        self.sec_grp_ingress_self_3000 = aws_ec2.CfnSecurityGroupIngress(
            self,
            "InboundSecGrp3000",
            ip_protocol='TCP',
            source_security_group_id=self.services_3000_sec_group.
            security_group_id,
            from_port=3000,
            to_port=3000,
            group_id=self.services_3000_sec_group.security_group_id)

        # Creating an EC2 bastion host to perform load test on private backend services
        amzn_linux = aws_ec2.MachineImage.latest_amazon_linux(
            generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=aws_ec2.AmazonLinuxEdition.STANDARD,
            virtualization=aws_ec2.AmazonLinuxVirt.HVM,
            storage=aws_ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role/profile that will be attached to the ec2 instance
        # Enabling service role so the EC2 service can use ssm
        role = aws_iam.Role(
            self,
            "InstanceSSM",
            assumed_by=aws_iam.ServicePrincipal("ec2.amazonaws.com"))

        # Attaching the SSM policy to the role so we can use SSM to ssh into the ec2 instance
        role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        # Reading user data, to install siege into the ec2 instance.
        with open("stresstool_user_data.sh") as f:
            user_data = f.read()

        # Instance creation
        self.instance = aws_ec2.Instance(
            self,
            "Instance",
            instance_name="{}-stresstool".format(stack_name),
            instance_type=aws_ec2.InstanceType("t3.medium"),
            machine_image=amzn_linux,
            vpc=self.vpc,
            role=role,
            user_data=aws_ec2.UserData.custom(user_data),
            security_group=self.services_3000_sec_group)

        # All Outputs required for other stacks to build
        core.CfnOutput(self,
                       "NSArn",
                       value=self.namespace_outputs['ARN'],
                       export_name="NSARN")
        core.CfnOutput(self,
                       "NSName",
                       value=self.namespace_outputs['NAME'],
                       export_name="NSNAME")
        core.CfnOutput(self,
                       "NSId",
                       value=self.namespace_outputs['ID'],
                       export_name="NSID")
        core.CfnOutput(self,
                       "FE2BESecGrp",
                       value=self.services_3000_sec_group.security_group_id,
                       export_name="SecGrpId")
        core.CfnOutput(self,
                       "ECSClusterName",
                       value=self.cluster_outputs['NAME'],
                       export_name="ECSClusterName")
        core.CfnOutput(self,
                       "ECSClusterSecGrp",
                       value=self.cluster_outputs['SECGRPS'],
                       export_name="ECSSecGrpList")
        core.CfnOutput(self,
                       "ServicesSecGrp",
                       value=self.services_3000_sec_group.security_group_id,
                       export_name="ServicesSecGrp")
        core.CfnOutput(self,
                       "StressToolEc2Id",
                       value=self.instance.instance_id)
        core.CfnOutput(self,
                       "StressToolEc2Ip",
                       value=self.instance.instance_private_ip)
Пример #27
0
 def create_ecs_cluster(self, vpc, asg):
     cluster = ecs.Cluster(self, 'GhostCluster', vpc=vpc)
     cluster.add_auto_scaling_group(asg)
     return cluster
    def __init__(self, scope: core.Construct, construct_id: str,
                 identifier: str, user_arn: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        bucket = aws_s3.Bucket(
            self,
            id=f"flow-storage-bucket-{identifier}",
            auto_delete_objects=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        cache_bucket = aws_s3.Bucket(
            self,
            id=f"flow-cache-bucket-{identifier}",
            auto_delete_objects=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        vpc = aws_ec2.Vpc(
            self,
            id=f"bakery-vpc-{identifier}",
            cidr="10.0.0.0/16",
            enable_dns_hostnames=True,
            enable_dns_support=True,
            nat_gateways=0,
            subnet_configuration=[
                aws_ec2.SubnetConfiguration(
                    name="PublicSubnet1",
                    subnet_type=aws_ec2.SubnetType.PUBLIC)
            ],
            max_azs=3,
        )
        security_group = aws_ec2.SecurityGroup(
            self,
            id=f"security-group-{identifier}",
            vpc=vpc,
        )
        security_group.add_ingress_rule(aws_ec2.Peer.any_ipv4(),
                                        aws_ec2.Port.tcp_range(8786, 8787))
        security_group.add_ingress_rule(aws_ec2.Peer.any_ipv6(),
                                        aws_ec2.Port.tcp_range(8786, 8787))
        security_group.add_ingress_rule(security_group, aws_ec2.Port.all_tcp())
        cluster = aws_ecs.Cluster(
            self,
            id=f"bakery-cluster-{identifier}",
            vpc=vpc,
        )

        ecs_task_role = aws_iam.Role(
            self,
            id=f"prefect-ecs-task-role-{identifier}",
            assumed_by=aws_iam.ServicePrincipal(
                service="ecs-tasks.amazonaws.com"),
        )
        ecs_task_role.add_to_policy(
            aws_iam.PolicyStatement(
                resources=["*"],
                actions=[
                    "iam:ListRoleTags",
                ],
            ))
        ecs_task_role.add_to_policy(
            aws_iam.PolicyStatement(
                resources=[
                    f"arn:aws:logs:{self.region}:{self.account}:log-group:dask-ecs*"
                ],
                actions=[
                    "logs:GetLogEvents",
                ],
            ))
        bucket.grant_read_write(ecs_task_role)
        cache_bucket.grant_read_write(ecs_task_role)

        bucket_user = aws_iam.User.from_user_arn(
            self,
            id=f"prefect-bucket-user-{identifier}",
            user_arn=user_arn,
        )

        cache_bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:*Object"],
                resources=[f"{cache_bucket.bucket_arn}/*"],
                principals=[bucket_user],
            ))

        cache_bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:ListBucket"],
                resources=[cache_bucket.bucket_arn],
                principals=[bucket_user],
            ))

        bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:*Object"],
                resources=[f"{bucket.bucket_arn}/*"],
                principals=[bucket_user],
            ))

        bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:ListBucket"],
                resources=[bucket.bucket_arn],
                principals=[bucket_user],
            ))

        ecs_task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonECS_FullAccess"))

        prefect_ecs_agent_task_definition = aws_ecs.FargateTaskDefinition(
            self,
            id=f"prefect-ecs-agent-task-definition-{identifier}",
            cpu=512,
            memory_limit_mib=2048,
            task_role=ecs_task_role,
        )

        runner_token_secret = aws_ecs.Secret.from_secrets_manager(
            secret=aws_secretsmanager.Secret.from_secret_arn(
                self,
                id=f"prefect-cloud-runner-token-{identifier}",
                secret_arn=os.environ["RUNNER_TOKEN_SECRET_ARN"],
            ),
            field="RUNNER_TOKEN",
        )

        prefect_ecs_agent_task_definition.add_container(
            id=f"prefect-ecs-agent-task-container-{identifier}",
            image=aws_ecs.ContainerImage.from_registry(
                os.environ["BAKERY_IMAGE"]),
            port_mappings=[
                aws_ecs.PortMapping(container_port=8080, host_port=8080)
            ],
            logging=aws_ecs.LogDriver.aws_logs(stream_prefix="ecs-agent"),
            environment={
                "PREFECT__CLOUD__AGENT__LABELS":
                os.environ["PREFECT__CLOUD__AGENT__LABELS"]
            },
            secrets={"PREFECT__CLOUD__AGENT__AUTH_TOKEN": runner_token_secret},
            command=[
                "prefect",
                "agent",
                "ecs",
                "start",
                "--agent-address",
                "http://:8080",
                "--cluster",
                cluster.cluster_arn,
                "--task-role-arn",
                ecs_task_role.role_arn,
            ],
        )

        prefect_ecs_agent_service = aws_ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            id=f"prefect-ecs-agent-service-{identifier}",
            assign_public_ip=True,
            platform_version=aws_ecs.FargatePlatformVersion.LATEST,
            desired_count=1,
            task_definition=prefect_ecs_agent_task_definition,
            cluster=cluster,
            propagate_tags=aws_ecs.PropagatedTagSource.SERVICE,
        )

        prefect_ecs_agent_service.target_group.configure_health_check(
            path="/api/health", port="8080")

        ecs_task_execution_role = aws_iam.Role(
            self,
            id=f"prefect-ecs-task-execution-role-{identifier}",
            assumed_by=aws_iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonECSTaskExecutionRolePolicy"),
            ],
        )

        core.CfnOutput(
            self,
            id=f"prefect-task-role-arn-output-{identifier}",
            export_name=f"prefect-task-role-arn-output-{identifier}",
            value=ecs_task_role.role_arn,
        )

        core.CfnOutput(
            self,
            id=f"prefect-cluster-arn-output-{identifier}",
            export_name=f"prefect-cluster-arn-output-{identifier}",
            value=cluster.cluster_arn,
        )

        core.CfnOutput(
            self,
            id=f"prefect-storage-bucket-name-output-{identifier}",
            export_name=f"prefect-storage-bucket-name-output-{identifier}",
            value=bucket.bucket_name,
        )

        core.CfnOutput(
            self,
            id=f"prefect-cache-bucket-name-output-{identifier}",
            export_name=f"prefect-cache-bucket-name-output-{identifier}",
            value=cache_bucket.bucket_name,
        )

        core.CfnOutput(
            self,
            id=f"prefect-task-execution-role-arn-output-{identifier}",
            export_name=f"prefect-task-execution-role-arn-output-{identifier}",
            value=ecs_task_execution_role.role_arn,
        )

        core.CfnOutput(
            self,
            id=f"prefect-security-group-output-{identifier}",
            export_name=f"prefect-security-group-output-{identifier}",
            value=security_group.security_group_id,
        )

        core.CfnOutput(
            self,
            id=f"prefect-vpc-output-{identifier}",
            export_name=f"prefect-vpc-output-{identifier}",
            value=vpc.vpc_id,
        )
Пример #29
0
    def __init__(self, scope: core.Construct, construct_id: str, *,
                 secrets: List[Secret]):
        super().__init__(scope, construct_id)

        vpc = aws_ec2.Vpc(
            self,
            "Vpc",
            enable_dns_support=True,
            enable_dns_hostnames=True,
            max_azs=3,
            nat_gateways=0,
            subnet_configuration=[
                aws_ec2.SubnetConfiguration(
                    name="Public", subnet_type=aws_ec2.SubnetType.PUBLIC)
            ],
        )

        postgres_volume_name = "duckbot_dbdata"
        file_system = aws_efs.FileSystem(
            self,
            "PostgresFileSystem",
            vpc=vpc,
            encrypted=True,
            file_system_name=postgres_volume_name,
            removal_policy=core.RemovalPolicy.DESTROY)
        file_system.node.default_child.override_logical_id(
            "FileSystem"
        )  # rename for compatibility with legacy cloudformation template

        task_definition = aws_ecs.TaskDefinition(
            self,
            "TaskDefinition",
            compatibility=aws_ecs.Compatibility.EC2,
            family="duckbot",
            memory_mib="960",
            network_mode=aws_ecs.NetworkMode.BRIDGE)

        postgres_data_path = "/data/postgres"
        postgres = task_definition.add_container(
            "postgres",
            container_name="postgres",
            image=aws_ecs.ContainerImage.from_registry("postgres:13.2"),
            essential=False,
            environment={
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "PGDATA": postgres_data_path,
            },
            health_check=aws_ecs.HealthCheck(
                command=["CMD", "pg_isready", "-U", "duckbot"],
                interval=core.Duration.seconds(30),
                timeout=core.Duration.seconds(5),
                retries=3,
                start_period=core.Duration.seconds(30),
            ),
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix="ecs",
                log_retention=aws_logs.RetentionDays.ONE_MONTH),
            memory_reservation_mib=128,
        )
        task_definition.add_volume(
            name=postgres_volume_name,
            efs_volume_configuration=aws_ecs.EfsVolumeConfiguration(
                file_system_id=file_system.file_system_id, root_directory="/"))
        postgres.add_mount_points(
            aws_ecs.MountPoint(source_volume=postgres_volume_name,
                               container_path=postgres_data_path,
                               read_only=False))

        secrets_as_parameters = {
            # note, parameter version is required by cdk, but does not make it into the template; specify version 1 for simplicity
            x.environment_name:
            aws_ssm.StringParameter.from_secure_string_parameter_attributes(
                self,
                x.environment_name,
                parameter_name=x.parameter_name,
                version=1)
            for x in secrets
        }
        duckbot = task_definition.add_container(
            "duckbot",
            container_name="duckbot",
            essential=True,
            image=aws_ecs.ContainerImage.from_registry(
                self.node.try_get_context("duckbot_image")),
            environment={"STAGE": "prod"},
            secrets={
                k: aws_ecs.Secret.from_ssm_parameter(v)
                for k, v in secrets_as_parameters.items()
            },
            health_check=aws_ecs.HealthCheck(
                command=["CMD", "python", "-m", "duckbot.health"],
                interval=core.Duration.seconds(30),
                timeout=core.Duration.seconds(10),
                retries=3,
                start_period=core.Duration.seconds(30),
            ),
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix="ecs",
                log_retention=aws_logs.RetentionDays.ONE_MONTH),
            memory_reservation_mib=128,
        )
        duckbot.add_link(postgres)

        asg = aws_autoscaling.AutoScalingGroup(
            self,
            "AutoScalingGroup",
            min_capacity=0,
            max_capacity=1,
            desired_capacity=1,
            machine_image=aws_ecs.EcsOptimizedImage.amazon_linux2(),
            instance_type=aws_ec2.InstanceType("t2.micro"),
            key_name="duckbot",  # needs to be created manually
            instance_monitoring=aws_autoscaling.Monitoring.BASIC,
            vpc=vpc,
        )

        asg.connections.allow_to_default_port(file_system)
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(22))
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(80))
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(443))

        cluster = aws_ecs.Cluster(self,
                                  "Cluster",
                                  cluster_name="duckbot",
                                  vpc=vpc)
        cluster.add_asg_capacity_provider(
            aws_ecs.AsgCapacityProvider(cluster,
                                        "AsgCapacityProvider",
                                        auto_scaling_group=asg),
            can_containers_access_instance_role=True)

        aws_ecs.Ec2Service(
            self,
            "Service",
            service_name="duckbot",
            cluster=cluster,
            task_definition=task_definition,
            desired_count=1,
            min_healthy_percent=0,
            max_healthy_percent=100,
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        dockerImageAsset = DockerImageAsset(
            self,
            f"{APPLICATION_NAME}",
            directory="../",
            file="Dockerfile",
            exclude=["cdk/node_modules", ".git", "cdk/cdk.out"],
        )

        vpc = ec2.Vpc(self, f"{APPLICATION_NAME}VPC", max_azs=3)
        cluster = ecs.Cluster(self, f"{APPLICATION_NAME}Cluster", vpc=vpc)

        app_task = ecs.FargateTaskDefinition(
            self,
            f"{APPLICATION_NAME}-task",
            family=f"{APPLICATION_NAME}-family",
            cpu=512,
            memory_limit_mib=2048,
        )

        dd_api_key = ssm.StringParameter.value_for_string_parameter(
            self, "/datadog/snyk_demo/dd_api_key", 1)

        DATADOG_AGENT_VARS["DD_API_KEY"] = dd_api_key

        java_service_container = app_task.add_container(
            f"{APPLICATION_NAME}-java-app",
            image=ecs.ContainerImage.from_docker_image_asset(dockerImageAsset),
            essential=True,
            docker_labels={
                "com.datadoghq.ad.instances":
                '[{"host": "%%host%%", "port": 6379}]',
                "com.datadoghq.ad.check_names": '["tomcat"]',
                "com.datadoghq.ad.init_configs": "[{}]",
            },
            environment=APP_ENV_VARS,
            logging=ecs.LogDrivers.firelens(
                options={
                    "Name": "datadog",
                    "Host": "http-intake.logs.datadoghq.com",
                    "TLS": "on",
                    "apikey": dd_api_key,
                    "dd_service": DD_SERVICE,
                    "dd_source": "tomcat",
                    "dd_tags": DD_TAGS,
                    "provider": "ecs",
                }),
        )

        datadog_agent_container = app_task.add_container(
            f"{APPLICATION_NAME}-datadog-agent",
            image=ecs.ContainerImage.from_registry(
                name="datadog/agent:latest"),
            essential=True,
            environment=DATADOG_AGENT_VARS,
        )

        # Port exposure for the containerized app
        java_service_container.add_port_mappings(
            ecs.PortMapping(container_port=8080, host_port=8080))

        # Mandatory port exposure for the Datadog agent
        datadog_agent_container.add_port_mappings(
            ecs.PortMapping(container_port=8126, host_port=8126))

        app_task_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            id=f"{APPLICATION_NAME}-service",
            service_name=f"{APPLICATION_NAME}",
            cluster=cluster,  # Required
            cpu=512,  # Default is 256
            desired_count=1,  # Default is 1
            task_definition=app_task,
            memory_limit_mib=2048,  # Default is 512
            listener_port=80,
            public_load_balancer=True,
            health_check_grace_period=core.Duration.seconds(120),
        )

        # Security Group to allow load balancer to communicate with ECS Containers.
        app_task_service.service.connections.allow_from_any_ipv4(
            ec2.Port.tcp(8080), f"{APPLICATION_NAME} app inbound")