Beispiel #1
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.fargate_task_def = aws_ecs.TaskDefinition(
            self,
            "TaskDef",
            compatibility=aws_ecs.Compatibility.EC2_AND_FARGATE,
            cpu='256',
            memory_mib='512',
        )

        self.container = self.fargate_task_def.add_container(
            "CrystalServiceContainerDef",
            image=aws_ecs.ContainerImage.from_registry(
                "brentley/ecsdemo-crystal"),
            memory_reservation_mib=512,
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix='ecsworkshop-crystal'))

        self.container.add_port_mappings(
            aws_ecs.PortMapping(container_port=3000))

        self.fargate_service = aws_ecs.FargateService(
            self,
            "CrystalFargateService",
            task_definition=self.fargate_task_def,
            cluster=self.base_platform.ecs_cluster,
            security_group=self.base_platform.services_sec_grp,
            desired_count=1,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=self.base_platform.sd_namespace,
                name='ecsdemo-crystal'))
Beispiel #2
0
 def create_service(self,
                    service_name,
                    family,
                    container_name,
                    environment,
                    command,
                    desired_count=1,
                    cpu="512",
                    memory="1024",
                    max_healthy_percent=200):
     worker_task_def = ecs.TaskDefinition(
         self,
         family,
         cpu=cpu,
         memory_mib=memory,
         compatibility=ecs.Compatibility.FARGATE,
         family=family,
         network_mode=ecs.NetworkMode.AWS_VPC)
     worker_task_def.add_container(container_name,
                                   image=self.image,
                                   command=[command],
                                   environment=environment,
                                   secrets=self.secrets,
                                   logging=ecs.LogDrivers.aws_logs(
                                       stream_prefix=family,
                                       log_retention=RetentionDays.ONE_DAY))
     return ecs.FargateService(
         self,
         service_name,
         service_name=service_name,
         task_definition=worker_task_def,
         cluster=self.cluster,
         desired_count=desired_count,
         platform_version=ecs.FargatePlatformVersion.VERSION1_4,
         max_healthy_percent=max_healthy_percent)
Beispiel #3
0
def get_fargate_task(scope: core.Construct, construct_id: str, mem_limit: str) -> aws_ecs.ITaskDefinition:
    return aws_ecs.TaskDefinition(
        scope,
        f"{construct_id}-fargate-task",
        compatibility=aws_ecs.Compatibility.FARGATE,
        network_mode=aws_ecs.NetworkMode.AWS_VPC,
        cpu="256",
        memory_mib=mem_limit,
    )
Beispiel #4
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.fargate_task_def = aws_ecs.TaskDefinition(
            self, "TaskDef",
            compatibility=aws_ecs.Compatibility.EC2_AND_FARGATE,
            cpu='256',
            memory_mib='512',
        )
        
        self.container = self.fargate_task_def.add_container(
            "CrystalServiceContainerDef",
            image=aws_ecs.ContainerImage.from_registry("adam9098/ecsdemo-crystal"),
            memory_reservation_mib=512,
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix='ecsworkshop-crystal'
            ),
            environment={
                "REGION": getenv('AWS_DEFAULT_REGION')
            },
        )
        
        self.container.add_port_mappings(
            aws_ecs.PortMapping(
                container_port=3000
            )
        )

        self.fargate_service = aws_ecs.FargateService(
            self, "CrystalFargateService",
            service_name='ecsdemo-crystal',
            task_definition=self.fargate_task_def,
            cluster=self.base_platform.ecs_cluster,
            security_group=self.base_platform.services_sec_grp,
            desired_count=1,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=self.base_platform.sd_namespace,
                name='ecsdemo-crystal'
            )
        )

        self.fargate_task_def.add_to_task_role_policy(
            aws_iam.PolicyStatement(
                actions=['ec2:DescribeSubnets'],
                resources=['*']
            )
        )
def create_ecs(self, vpc, sg_dictionary, repository):

    # Cluster
    cluster = _ecs.Cluster(
        self, 'Cluster',
        cluster_name='DEMO-CLUSTER',
        vpc=vpc
    )

    # Role(task execution)
    execution_role = _iam.Role(
        self, 'ExecutionRole',
        role_name='DEMO-TASK-EXECUTION-ROLE',
        assumed_by=_iam.ServicePrincipal('ecs-tasks.amazonaws.com')
    )
    execution_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonECSTaskExecutionRolePolicy'))

    # TaskDefinition
    task_def = _ecs.TaskDefinition(
        self, 'TaskDefinition',
        compatibility=_ecs.Compatibility.FARGATE,
        cpu='2048',
        memory_mib='8192',
        network_mode=_ecs.NetworkMode.AWS_VPC,
        execution_role=execution_role,
        family='DEMO-TASK',
        task_role=execution_role,
    )

    # Container
    container = task_def.add_container(
        id='DEMO-CONTAINER',
        image=_ecs.ContainerImage.from_ecr_repository(repository),
        logging=_ecs.LogDriver.aws_logs(
            stream_prefix='ecs',
            log_group=_logs.LogGroup(
                self, 'LogGroup',
                log_group_name='/ecs/'+'DEMO-TASK',
                retention=_logs.RetentionDays.INFINITE,
            )
        )
    )
    container.add_port_mappings(_ecs.PortMapping(container_port=8080))
Beispiel #6
0
  def CreateSVC(self,ZachTaskList,ZachECSNodeName,cluster,vpc,AppendHostFile,ENV_VARS):
    for TaskName, TaskValue in ZachTaskList.items():
      ZachTaskDef = ecs.TaskDefinition(self, id=ZachECSNodeName + "-" + TaskName,compatibility=ecs.Compatibility.EC2,network_mode=ecs.NetworkMode.AWS_VPC)
      core.CfnOutput(self, id=TaskName + "ARN", value=ZachTaskDef.task_definition_arn)
      for num in range(TaskValue.get("num", 1)):
        container = ZachTaskDef.add_container(id=ZachECSNodeName + "-" + TaskName + str(num), cpu=1, memory_limit_mib=512,
                                  memory_reservation_mib=256, readonly_root_filesystem=True,
                                  working_directory="/data/web", user='******',
                                  health_check=ecs.HealthCheck(command=["ping 127.0.0.1"],
                                                               interval=core.Duration.seconds(30), retries=5,
                                                               start_period=core.Duration.minutes(1),
                                                               timeout=core.Duration.seconds(10)),
                                  hostname=ZachECSNodeName + "-" + TaskName, extra_hosts=AppendHostFile,
                                  environment=ENV_VARS, docker_labels=ENV_VARS,
                                  image=ecs.ContainerImage.from_registry(TaskValue.get("image", "nginx:latest")),
                                  logging=ecs.LogDrivers.fluentd())
        port_mapping = ecs.PortMapping(
          container_port=TaskValue.get("port", 80),
          host_port=TaskValue.get("port", 80),
          protocol=ecs.Protocol.TCP
        )
        container.add_port_mappings(port_mapping)

        core.CfnOutput(self, id=container.container_name + "ContainPort", value=str(container.container_port))
        core.CfnOutput(self, id=container.container_name + "MemLimit", value=str(container.memory_limit_specified))
        core.CfnOutput(self, id=container.container_name + "HostPort", value=str(port_mapping.host_port))

      svc = ecs.Ec2Service(self, id=ZachECSNodeName+TaskName,
                           task_definition=ZachTaskDef,
                           cluster=cluster,
                           desired_count=2,
                           security_group=self.VPC_SG(TaskName,vpc),
                           assign_public_ip=True,
                           # health_check_grace_period=core.Duration.seconds(30), # Health check grace period is only valid for services configured to use load balancers
                           service_name=ZachECSNodeName+TaskName)
      svc.add_placement_strategies(ecs.PlacementStrategy.spread_across(ecs.BuiltInAttributes.INSTANCE_ID),
                                   ecs.PlacementStrategy.packed_by(ecs.BinPackResource.MEMORY))
      core.CfnOutput(self, id=ZachECSNodeName+TaskName + "ServiceName", value=svc.service_name)
      core.CfnOutput(self, id=ZachECSNodeName+TaskName + "ServiceARN", value=svc.service_arn)

    core.CfnOutput(self, id=ZachECSNodeName+TaskName + "ARN", value=cluster.cluster_arn)
    core.CfnOutput(self, id=ZachECSNodeName+TaskName + "VPCID", value=str(cluster.vpc.vpc_id))
    core.CfnOutput(self, id=ZachECSNodeName+TaskName + "VPCZone", value=str(cluster.vpc.availability_zones))
Beispiel #7
0
    def __init__(self, scope: core.Construct, id: str,
                 infra: RtspBaseResourcesConstruct, home_base: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        core.Tags.of(self).add('home_base', home_base)

        definition = ecs.TaskDefinition(self,
                                        'DefaultTask',
                                        compatibility=ecs.Compatibility.EC2,
                                        cpu='128',
                                        memory_mib='128',
                                        task_role=infra.task_role,
                                        execution_role=infra.execution_role,
                                        network_mode=ecs.NetworkMode.AWS_VPC)

        definition.add_container(
            'DefaultContainer',
            memory_reservation_mib=128,
            image=infra.container,
            logging=ecs.AwsLogDriver(
                stream_prefix='rtsp-connector/{}'.format(home_base),
                log_group=infra.log_group),
            environment={
                'BUCKET': infra.bucket.bucket_name,
                'FRAME_ANALYZED_TOPIC': infra.frameAnalyzed.topic_arn,
                'REK_COLLECT_ID': 'homenet-hybrid-collection',
                'REGION': core.Stack.of(self).region,
            })

        ecs.Ec2Service(self,
                       'RtspConnectorService',
                       service_name='{}-rtsp-connector-{}'.format(
                           infra.landing_zone.zone_name, home_base),
                       task_definition=definition,
                       assign_public_ip=False,
                       cluster=infra.cluster,
                       deployment_controller=ecs.DeploymentController(
                           type=ecs.DeploymentControllerType.ECS),
                       security_group=infra.security_group,
                       vpc_subnets=ec2.SubnetSelection(
                           subnet_group_name=infra.subnet_group_name),
                       desired_count=desired_count)
Beispiel #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # EC2 Vpc construct

        vpc = ec2.Vpc(
            self,
            id="cromwell_server_vpc",
            max_azs=2
        )

        # ECS Cluster construct
        cluster = ecs.Cluster(
            self,
            id="cromwell_cluster",
            vpc=vpc
        )

        # IAM roles
        ecstaskexecutionrole = iam.Role.from_role_arn(
            self,
            "ecstaskexecutionrole",
            role_arn="arn:aws:iam::562965587442:role/ecsTaskExecutionRole"
        )

        batch_service_role = iam.Role.from_role_arn(
            self,
            "batchservicerole",
            role_arn="arn:aws:iam::562965587442:role/AWSBatchServiceRole"
        )

        fargate_cromwell_role = iam.Role.from_role_arn(
            self,
            "fargate_cromwell_role",
            role_arn="arn:aws:iam::562965587442:role/fargate_cromwell_role"
        )

        # Cromwell docker image from ECR
        container_img = ecr.Repository.from_repository_name(
            self,
            "cromwell_docker_image",
            repository_name=CROMWELL_REPOSITORY_NAME
        )

        # ECS task definition construct
        task_def = ecs.TaskDefinition(
            self,
            "cromwell_server_task",
            execution_role=ecstaskexecutionrole,
            task_role=fargate_cromwell_role,
            compatibility=ecs.Compatibility.FARGATE,
            cpu="1024",
            memory_mib="4096"
        )

        # ECS container definition construct
        container_def = ecs.ContainerDefinition(
            self,
            "cromwell_container",
            task_definition=task_def,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=container_img,
                tag=CROMWELL_IMAGE_TAG
            ),
            command=["bash", "run_cromwell_server.sh"],
            cpu=1,
            health_check=None,
            working_directory='/',
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="cromwell_logs",
                datetime_format=None,
                log_group=None,
                log_retention=None,
                multiline_pattern=None
            )
        )
        container_def.add_port_mappings(
            ecs.PortMapping(
                container_port=CROMWELL_PORT_NUMBER,
                host_port=CROMWELL_PORT_NUMBER,
                protocol=ecs.Protocol.TCP
            )
        )

        # EC2 Security Group construct
        security_group = ec2.SecurityGroup(
            self,
            "cromwell_server_security_group",
            vpc=vpc,
            allow_all_outbound=True,
            security_group_name="cromwell_server_security_group",
            description="This is the security group assigned to the cromwell server running as a Fargate service.",
        )
        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ecs.Protocol.TCP,
                                from_port=CROMWELL_PORT_NUMBER,
                                to_port=CROMWELL_PORT_NUMBER,
                                string_representation="cromwell_server_port"),
            remote_rule=None
        )

        # ECS Fargate Service construct
        service = ecs.FargateService(
            self,
            "cromwell_service",
            task_definition=task_def,
            cluster=cluster,
            service_name="cromwell_server_service",
            assign_public_ip=True,
            desired_count=1,
            security_group=security_group
        )


        # Batch resources
        # Reference:
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html
        # with open("lib/aws_batch_launch_template_user_data.txt", 'r') as user_data_file:
        #     user_data = user_data_file.read()
        #
        # ec2_user_data = ec2.UserData.custom(content=user_data)
        # ec2_instance =ec2.Instance(
        #     self,
        #     "ec2Instance",
        #     instance_type=ec2.InstanceType("t2.small"),
        #     machine_image=ec2.AmazonLinuxImage(),
        #     vpc=vpc,
        #     user_data=ec2_user_data
        # )

        # launch_template_data = core.CfnResource(
        #     self,
        #     "cromwell_launch_template_data",
        #     type="AWS::EC2::LaunchTemplate.LaunchTemplateData",
        #     properties={
        #         "UserData": user_data
        #     }
        # )
        #
        # launch_template = ec2.CfnLaunchTemplate(
        #     self,
        #     "cromwell_launch_template",
        #     launch_template_name="cromwell_launch_template",
        #     launch_template_data=launch_template_data,
        # )
        #
        # compute_resources = core.CfnResource(
        #     self,
        #     "cromwell_compute_resources",
        #     type="AWS::Batch::ComputeEnvironment.ComputeResources",
        #     properties={
        #       "DesiredvCpus": 256,
        #       "Ec2KeyPair": "genovic-qc-eddev",
        #       "InstanceRole": "arn:aws:iam::562965587442:role/ecsInstanceRole",
        #       "InstanceTypes": ["optimal"],
        #       "LaunchTemplate": launch_template.launch_template_name,
        #       "MaxvCpus": 256,
        #       "MinvCpus": 0,
        #       "SecurityGroupIds": [vpc.vpc_default_security_group],
        #       "Subnets": [subnet.subnet_id for subnet in vpc.public_subnets],
        #       "Tags": "cromwell_compute_resource",
        #       "Type": "EC2"
        #     }
        # )
        #
        # compute_env = batch.CfnComputeEnvironment(
        #     self,
        #     "cromwell_compute_env",
        #     service_role=batch_service_role,
        #     compute_environment_name="cromwell_compute_env",
        #     type="MANAGED",
        #     state="ENABLED",
        #     compute_resources=compute_resources
        # )
        #
        # queue = batch.CfnJobQueue(
        #     self,
        #     "cromwell_queue",
        #     compute_environment_order=compute_env,
        #     priority=1,
        #     job_queue_name="cromwell_queue",
        #     state="ENABLED"
        # )
        #
        # core.CfnOutput(
        #     self,
        #     "cromwell_queue_name",
        #     value=queue.job_queue_name
        # )

        core.CfnOutput(
            self,
            "FargateCromwellServiceArn",
            value=service.service_arn
        )
Beispiel #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # CONTAINER_IMAGE = 'daskdev/dask:0.19.4'
        # if use_rapids:
        #   CONTAINER_IMAGE = 'rapidsai/rapidsai:latest'

        # if use_notebook:
        #   CONTAINER_IMAGE = 'daskdev/dask-notebook:latest'

        #TODO : Create ECR repository
        #Update: Not required sunce ecs.ContainerImage already creates and pushes using same asset

        #ecr = aws_ecr.Repository(self, 'MyECR', repository_name='dask')
        # not needed if you use an asset like below:

        dockercontainer = ecs.ContainerImage.from_asset(
            directory='dockerstuff', build_args=['-t dask .'])

        # Create vpc
        vpc = ec2.Vpc(self, 'MyVpc', max_azs=3)  # default is all AZs in region
        subnets = vpc.private_subnets

        # Create log groups for the scheduler and workers
        s_logs = logs.LogGroup(self, 'SlogGroup', log_group_name='SlogGroup')
        w_logs = logs.LogGroup(self, 'WlogGroup', log_group_name='WlogGroup')

        #Create private namespace
        #nspace = sd.PrivateDnsNamespace(self, 'MyNamespace', vpc=vpc, name='local-dask')

        # #Create role for ECS
        nRole = iam_.Role(self,
                          'ECSExecutionRole',
                          assumed_by=iam_.ServicePrincipal('ecs-tasks'))

        nPolicy = iam_.Policy(
            self,
            "ECSExecutionPolicy",
            policy_name="ECSExecutionPolicy",
            statements=[
                iam_.PolicyStatement(actions=[
                    'ecr:BatchCheckLayerAvailability',
                    'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage',
                    'ecr:GetAuthorizationToken', 'logs:CreateLogStream',
                    'logs:PutLogEvents', 'sagemaker:*', 's3:*'
                ],
                                     resources=[
                                         '*',
                                     ]),
            ]).attach_to_role(nRole)

        # Create ECS cluster
        cluster = ecs.Cluster(self,
                              'DaskCluster',
                              vpc=vpc,
                              cluster_name='Fargate-Dask-Cluster')

        nspace = cluster.add_default_cloud_map_namespace(
            name='local-dask', type=sd.NamespaceType.DNS_PRIVATE, vpc=vpc)

        #TO DO: Use default namespace for cluster and use cmap options within fargate service
        #Update: done

        # schedulerRegistry = sd.Service(self,'serviceRegistryScheduler',
        #     namespace=nspace,dns_ttl=core.Duration.seconds(60),
        #     custom_health_check=sd.HealthCheckCustomConfig(failure_threshold=10),
        #     name='Dask-Scheduler')

        # # schedulerRegistry.register_ip_instance(id='serviceRegistryScheduler',ipv4='')

        # workerRegistry = sd.Service(self,'workerRegistryScheduler',
        #     namespace=nspace,dns_ttl=core.Duration.seconds(60),
        #     custom_health_check=sd.HealthCheckCustomConfig(failure_threshold=10),
        #     name='Dask-Worker')

        # -------------------- Add scheduler task ------------------------
        schedulerTask = ecs.TaskDefinition(
            self,
            'taskDefinitionScheduler',
            compatibility=ecs.Compatibility.FARGATE,
            cpu='4096',
            memory_mib='8192',
            network_mode=ecs.NetworkMode.AWS_VPC,
            placement_constraints=None,
            execution_role=nRole,
            family='Dask-Scheduler',
            task_role=nRole)

        schedulerTask.add_container('MySchedulerImage',
                                    image=dockercontainer,
                                    command=['dask-scheduler'],
                                    cpu=4096,
                                    essential=True,
                                    logging=ecs.LogDriver.aws_logs(
                                        stream_prefix='ecs', log_group=s_logs),
                                    memory_limit_mib=8192,
                                    memory_reservation_mib=8192)

        # -------------------- Add worker task -----------------------------
        workerTask = ecs.TaskDefinition(
            self,
            'taskDefinitionWorker',
            compatibility=ecs.Compatibility.FARGATE,
            cpu='4096',
            memory_mib='8192',
            network_mode=ecs.NetworkMode.AWS_VPC,
            placement_constraints=None,
            execution_role=nRole,
            family='Dask-Worker',
            task_role=nRole)

        workerTask.add_container(
            'MyWorkerImage',
            image=dockercontainer,
            command=[
                'dask-worker', 'dask-scheduler.local-dask:8786',
                '--memory-limit 1800MB', '--worker-port 9000',
                '--nanny-port 9001', '--bokeh-port 9002'
            ],
            cpu=4096,
            essential=True,
            logging=ecs.LogDriver.aws_logs(stream_prefix='ecs',
                                           log_group=s_logs),
            memory_limit_mib=8192,
            memory_reservation_mib=8192)

        # Task security group
        sg = ec2.SecurityGroup(self,
                               'MySG',
                               vpc=vpc,
                               description='Enable Scheduler ports access',
                               security_group_name='DaskSecurityGroup')

        # Ingress rule requires IPeer not Peer
        # TO DO: fix from any ipv4 to SG
        p1 = ec2.Peer().ipv4('0.0.0.0/0')
        p2 = ec2.Peer().ipv4('0.0.0.0/0')

        sg.add_ingress_rule(peer=p1,
                            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                                string_representation='p1',
                                                from_port=8786,
                                                to_port=8789))

        sg.add_ingress_rule(peer=p2,
                            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                                string_representation='p2',
                                                from_port=9000,
                                                to_port=9002))

        # ----------------- Add Scheduler Service -----------------------

        # deployconfig = ecs.CfnService.DeploymentConfigurationProperty(maximum_percent=200,minimum_healthy_percent=100)

        # vpcconfig = ecs.CfnService.AwsVpcConfigurationProperty(subnets = subnets,assign_public_ip=True,security_groups=[sg])

        # networkconfig = ecs.CfnService.NetworkConfigurationProperty(awsvpc_configuration=vpcconfig)

        # schedulerService = ecs.CfnService(self, 'DaskSchedulerService',
        #     task_definition = schedulerTask, deployment_configuration=deployconfig,
        #     cluster=cluster, desired_count=1, enable_ecs_managed_tags=None,
        #     launch_type='FARGATE',network_configuration=networkconfig,
        #     service_registries=schedulerRegistry)

        #ecs.CfnService.ServiceRegistryProperty()

        # Try fargate service? No service registry option available
        #using default cluster namespace
        cmap1 = ecs.CloudMapOptions(dns_ttl=core.Duration.seconds(60),
                                    failure_threshold=10,
                                    name='Dask-Scheduler')

        schedulerService = ecs.FargateService(
            self,
            'DaskSchedulerService',
            task_definition=schedulerTask,
            assign_public_ip=True,
            security_group=sg,
            #vpc_subnets=subnets,
            cluster=cluster,
            desired_count=1,
            max_healthy_percent=200,
            min_healthy_percent=100,
            service_name='Dask-Scheduler',
            cloud_map_options=cmap1)

        # schedulerService.enable_cloud_map(name = 'serviceRegistryScheduler')
        # schedulerRegistry.register_non_ip_instance(self,instance_id='DaskSchedulerService')

        # ----------------- Add Worker Service -----------------------
        #using default cluster namespace
        cmap2 = ecs.CloudMapOptions(dns_ttl=core.Duration.seconds(60),
                                    failure_threshold=10,
                                    name='Dask-Worker')

        workerService = ecs.FargateService(
            self,
            'DaskWorkerService',
            task_definition=workerTask,
            assign_public_ip=True,
            security_group=sg,
            #vpc_subnets=subnets,
            cluster=cluster,
            desired_count=1,
            max_healthy_percent=200,
            min_healthy_percent=100,
            service_name='Dask-Worker',
            cloud_map_options=cmap2)

        # workerService.enable_cloud_map(name = 'workerRegistryScheduler')

        #------------------------------------------------------------------------

        # Very less control with ECS patterns, did not work

        # ecs_patterns.ApplicationLoadBalancedFargateService(self, "DaskFargateStack",
        #     cluster=cluster,            # Required
        #     cpu=512,                    # Default is 256
        #     desired_count=6,            # Default is 1
        #     task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
        #         image=ecs.ContainerImage.from_registry(CONTAINER_IMAGE)),
        #     memory_limit_mib=2048,      # Default is 512
        #     public_load_balancer=True)  # Default is False

        # Start a notebook in the same vpc
        # print(type(sg.security_group_id))
        # print("------------------------------")
        # print(subnets[0].subnet_id)
        #Create role for Notebook instance
        smRole = iam_.Role(self,
                           "notebookAccessRole",
                           assumed_by=iam_.ServicePrincipal('sagemaker'))

        smPolicy = iam_.Policy(self,
                               "notebookAccessPolicy",
                               policy_name="notebookAccessPolicy",
                               statements=[
                                   iam_.PolicyStatement(
                                       actions=['s3:*', 'ecs:*'],
                                       resources=[
                                           '*',
                                       ]),
                               ]).attach_to_role(smRole)

        notebook = sagemaker_.CfnNotebookInstance(
            self,
            'DaskNotebook',
            instance_type='ml.t2.medium',
            volume_size_in_gb=50,
            security_group_ids=[sg.security_group_id],
            subnet_id=subnets[0].subnet_id,
            notebook_instance_name='DaskNotebook',
            role_arn=smRole.role_arn,
            root_access='Enabled',
            direct_internet_access='Enabled',
            default_code_repository=
            'https://github.com/w601sxs/dask-examples.git')
Beispiel #10
0
    def __init__(self, scope: core.Construct, construct_id: str, *,
                 secrets: List[Secret]):
        super().__init__(scope, construct_id)

        vpc = aws_ec2.Vpc(
            self,
            "Vpc",
            enable_dns_support=True,
            enable_dns_hostnames=True,
            max_azs=3,
            nat_gateways=0,
            subnet_configuration=[
                aws_ec2.SubnetConfiguration(
                    name="Public", subnet_type=aws_ec2.SubnetType.PUBLIC)
            ],
        )

        postgres_volume_name = "duckbot_dbdata"
        file_system = aws_efs.FileSystem(
            self,
            "PostgresFileSystem",
            vpc=vpc,
            encrypted=True,
            file_system_name=postgres_volume_name,
            removal_policy=core.RemovalPolicy.DESTROY)
        file_system.node.default_child.override_logical_id(
            "FileSystem"
        )  # rename for compatibility with legacy cloudformation template

        task_definition = aws_ecs.TaskDefinition(
            self,
            "TaskDefinition",
            compatibility=aws_ecs.Compatibility.EC2,
            family="duckbot",
            memory_mib="960",
            network_mode=aws_ecs.NetworkMode.BRIDGE)

        postgres_data_path = "/data/postgres"
        postgres = task_definition.add_container(
            "postgres",
            container_name="postgres",
            image=aws_ecs.ContainerImage.from_registry("postgres:13.2"),
            essential=False,
            environment={
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "PGDATA": postgres_data_path,
            },
            health_check=aws_ecs.HealthCheck(
                command=["CMD", "pg_isready", "-U", "duckbot"],
                interval=core.Duration.seconds(30),
                timeout=core.Duration.seconds(5),
                retries=3,
                start_period=core.Duration.seconds(30),
            ),
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix="ecs",
                log_retention=aws_logs.RetentionDays.ONE_MONTH),
            memory_reservation_mib=128,
        )
        task_definition.add_volume(
            name=postgres_volume_name,
            efs_volume_configuration=aws_ecs.EfsVolumeConfiguration(
                file_system_id=file_system.file_system_id, root_directory="/"))
        postgres.add_mount_points(
            aws_ecs.MountPoint(source_volume=postgres_volume_name,
                               container_path=postgres_data_path,
                               read_only=False))

        secrets_as_parameters = {
            # note, parameter version is required by cdk, but does not make it into the template; specify version 1 for simplicity
            x.environment_name:
            aws_ssm.StringParameter.from_secure_string_parameter_attributes(
                self,
                x.environment_name,
                parameter_name=x.parameter_name,
                version=1)
            for x in secrets
        }
        duckbot = task_definition.add_container(
            "duckbot",
            container_name="duckbot",
            essential=True,
            image=aws_ecs.ContainerImage.from_registry(
                self.node.try_get_context("duckbot_image")),
            environment={"STAGE": "prod"},
            secrets={
                k: aws_ecs.Secret.from_ssm_parameter(v)
                for k, v in secrets_as_parameters.items()
            },
            health_check=aws_ecs.HealthCheck(
                command=["CMD", "python", "-m", "duckbot.health"],
                interval=core.Duration.seconds(30),
                timeout=core.Duration.seconds(10),
                retries=3,
                start_period=core.Duration.seconds(30),
            ),
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix="ecs",
                log_retention=aws_logs.RetentionDays.ONE_MONTH),
            memory_reservation_mib=128,
        )
        duckbot.add_link(postgres)

        asg = aws_autoscaling.AutoScalingGroup(
            self,
            "AutoScalingGroup",
            min_capacity=0,
            max_capacity=1,
            desired_capacity=1,
            machine_image=aws_ecs.EcsOptimizedImage.amazon_linux2(),
            instance_type=aws_ec2.InstanceType("t2.micro"),
            key_name="duckbot",  # needs to be created manually
            instance_monitoring=aws_autoscaling.Monitoring.BASIC,
            vpc=vpc,
        )

        asg.connections.allow_to_default_port(file_system)
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(22))
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(80))
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(443))

        cluster = aws_ecs.Cluster(self,
                                  "Cluster",
                                  cluster_name="duckbot",
                                  vpc=vpc)
        cluster.add_asg_capacity_provider(
            aws_ecs.AsgCapacityProvider(cluster,
                                        "AsgCapacityProvider",
                                        auto_scaling_group=asg),
            can_containers_access_instance_role=True)

        aws_ecs.Ec2Service(
            self,
            "Service",
            service_name="duckbot",
            cluster=cluster,
            task_definition=task_definition,
            desired_count=1,
            min_healthy_percent=0,
            max_healthy_percent=100,
        )
Beispiel #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        parse_image_list_file = aws_lambda.Function(
            self,
            'parse_image_list_file',
            handler='parse_image_list_file.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('parse_image_list_file'),
            memory_size=10240,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        list_objects = aws_lambda.Function(
            self,
            'list_objects',
            handler='list_objects.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('list_objects'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        get_size_and_store = aws_lambda.Function(
            self,
            'get_size_and_store',
            handler='get_size_and_store.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('get_size_and_store'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        images_bucket = aws_s3.Bucket(self, "images_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"],
            resources=["*"])

        parse_image_list_file.add_to_role_policy(
            lambda_supplemental_policy_statement)
        list_objects.add_to_role_policy(lambda_supplemental_policy_statement)
        get_size_and_store.add_to_role_policy(
            lambda_supplemental_policy_statement)

        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        # notification_topic = aws_sns.Topic(self, "notification_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        images_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.LambdaDestination(parse_image_list_file))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        comprehend_queue_iqueue = aws_sqs.Queue(self,
                                                "comprehend_queue_iqueue")
        comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=comprehend_queue_iqueue)
        comprehend_queue = aws_sqs.Queue(
            self,
            "comprehend_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=comprehend_queue_iqueue_dlq)

        rekognition_queue_iqueue = aws_sqs.Queue(self,
                                                 "rekognition_queue_iqueue")
        rekognition_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=rekognition_queue_iqueue)
        rekognition_queue = aws_sqs.Queue(
            self,
            "rekognition_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=rekognition_queue_dlq)

        object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue")
        object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10,
                                                   queue=object_queue_iqueue)
        object_queue = aws_sqs.Queue(
            self,
            "object_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=object_queue_dlq)

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        get_size_and_store.add_event_source(
            SqsEventSource(object_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################
        s3workflow_domain = aws_elasticsearch.Domain(
            self,
            "s3workflow_domain",
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3workflow_pool = aws_cognito.UserPool(
            self,
            "s3workflow-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        ###########################################################################
        # AMAZON VPC
        ###########################################################################
        vpc = aws_ec2.Vpc(self, "s3workflowVPC",
                          max_azs=3)  # default is all AZs in region

        ###########################################################################
        # AMAZON ECS CLUSTER
        ###########################################################################
        cluster = aws_ecs.Cluster(self, "s3", vpc=vpc)

        ###########################################################################
        # AMAZON ECS Repositories
        ###########################################################################
        rekognition_repository = aws_ecr.Repository(
            self,
            "rekognition_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))
        comprehend_repository = aws_ecr.Repository(
            self,
            "comprehend_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))

        ###########################################################################
        # AMAZON ECS Roles and Policies
        ###########################################################################
        task_execution_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*"
            ],
            resources=["*"])
        task_execution_policy_document = aws_iam.PolicyDocument()
        task_execution_policy_document.add_statements(
            task_execution_policy_statement)
        task_execution_policy = aws_iam.Policy(
            self,
            "task_execution_policy",
            document=task_execution_policy_document)
        task_execution_role = aws_iam.Role(
            self,
            "task_execution_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_execution_role.attach_inline_policy(task_execution_policy)

        task_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*",
                "comprehend:*", "es:*"
            ],
            resources=["*"])
        task_policy_document = aws_iam.PolicyDocument()
        task_policy_document.add_statements(task_policy_statement)
        task_policy = aws_iam.Policy(self,
                                     "task_policy",
                                     document=task_policy_document)
        task_role = aws_iam.Role(
            self,
            "task_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_role.attach_inline_policy(task_policy)

        ###########################################################################
        # AMAZON ECS Task definitions
        ###########################################################################
        rekognition_task_definition = aws_ecs.TaskDefinition(
            self,
            "rekognition_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        comprehend_task_definition = aws_ecs.TaskDefinition(
            self,
            "comprehend_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        ###########################################################################
        # AMAZON ECS Images
        ###########################################################################
        rekognition_ecr_image = aws_ecs.EcrImage(
            repository=rekognition_repository, tag="latest")
        comprehend_ecr_image = aws_ecs.EcrImage(
            repository=comprehend_repository, tag="latest")

        ###########################################################################
        # ENVIRONMENT VARIABLES
        ###########################################################################
        environment_variables = {}
        environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url
        environment_variables[
            "REKOGNITION_QUEUE"] = rekognition_queue.queue_url
        environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name
        environment_variables[
            "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint

        parse_image_list_file.add_environment(
            "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint)
        parse_image_list_file.add_environment("QUEUEURL",
                                              rekognition_queue.queue_url)
        parse_image_list_file.add_environment("DEBUG", "False")
        parse_image_list_file.add_environment("BUCKET", "-")
        parse_image_list_file.add_environment("KEY", "-")

        list_objects.add_environment("QUEUEURL", object_queue.queue_url)
        list_objects.add_environment("ELASTICSEARCH_HOST",
                                     s3workflow_domain.domain_endpoint)
        list_objects.add_environment("S3_BUCKET_NAME",
                                     images_bucket.bucket_name)
        list_objects.add_environment("S3_BUCKET_PREFIX", "images/")
        list_objects.add_environment("S3_BUCKET_SUFFIX", "")
        list_objects.add_environment("LOGGING_LEVEL", "INFO")

        get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url)
        get_size_and_store.add_environment("ELASTICSEARCH_HOST",
                                           s3workflow_domain.domain_endpoint)
        get_size_and_store.add_environment("S3_BUCKET_NAME",
                                           images_bucket.bucket_name)
        get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/")
        get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "")
        get_size_and_store.add_environment("LOGGING_LEVEL", "INFO")

        ###########################################################################
        # ECS Log Drivers
        ###########################################################################
        rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))
        comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))

        ###########################################################################
        # ECS Task Definitions
        ###########################################################################
        rekognition_task_definition.add_container(
            "rekognition_task_definition",
            image=rekognition_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=rekognition_task_log_driver)

        comprehend_task_definition.add_container(
            "comprehend_task_definition",
            image=comprehend_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=comprehend_task_log_driver)

        ###########################################################################
        # AWS ROUTE53 HOSTED ZONE
        ###########################################################################
        hosted_zone = aws_route53.HostedZone(
            self,
            "hosted_zone",
            zone_name="s3workflow.com",
            comment="private hosted zone for s3workflow system")
        hosted_zone.add_vpc(vpc)
Beispiel #12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Build and push faropt container
        dockercontainer = ecs.ContainerImage.from_asset(
            directory='Dockerstuff', build_args=['-t faropt .'])

        # Create vpc
        vpc = ec2.Vpc(self, 'MyVpc', max_azs=3)  # default is all AZs in region
        subnets = vpc.private_subnets

        # Create log groups for workers
        w_logs = logs.LogGroup(self,
                               'faroptlogGroup',
                               log_group_name='faroptlogGroup')

        # #Create role for ECS
        nRole = iam.Role(self,
                         'ECSExecutionRole',
                         assumed_by=iam.ServicePrincipal('ecs-tasks'))

        nPolicy = iam.Policy(
            self,
            "ECSExecutionPolicy",
            policy_name="ECSExecutionPolicy",
            statements=[
                iam.PolicyStatement(actions=[
                    'ecr:BatchCheckLayerAvailability',
                    'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage',
                    'ecr:GetAuthorizationToken', 'logs:CreateLogStream',
                    'logs:PutLogEvents', 'sagemaker:*', 's3:*',
                    'cloudwatch:PutMetricData'
                ],
                                    resources=[
                                        '*',
                                    ]),
            ]).attach_to_role(nRole)

        # Create ECS cluster
        cluster = ecs.Cluster(self,
                              'FarOptCluster',
                              vpc=vpc,
                              cluster_name='FarOptCluster')

        nspace = cluster.add_default_cloud_map_namespace(
            name='local-faropt', type=sd.NamespaceType.DNS_PRIVATE, vpc=vpc)

        # create s3 bucket

        s3 = _s3.Bucket(self, "s3bucket")
        s3async = _s3.Bucket(self, "s3async")

        #
        pkey1 = ddb.Attribute(name='jobid', type=ddb.AttributeType.STRING)
        jobtable = ddb.Table(self,
                             "FaroptJobTable",
                             table_name='FaroptJobTable',
                             partition_key=pkey1)  #,

        pkey2 = ddb.Attribute(name='recipeid', type=ddb.AttributeType.STRING)
        recipetable = ddb.Table(self,
                                "FaroptRecipeTable",
                                table_name='FaroptRecipeTable',
                                partition_key=pkey2)  #,
        # billing_mode=None, encryption=None, encryption_key=None,
        # point_in_time_recovery=None, read_capacity=None, removal_policy=None,
        # replication_regions=None, server_side_encryption=None,
        # sort_key=None, stream=None, time_to_live_attribute=None, write_capacity=None)

        # -------------------- Add worker task ------------------------

        faroptTask = ecs.TaskDefinition(
            self,
            'taskDefinitionScheduler',
            cpu='4096',
            memory_mib='16384',
            network_mode=ecs.NetworkMode.AWS_VPC,
            placement_constraints=None,
            execution_role=nRole,
            family='Faropt-Scheduler',
            task_role=nRole,
            compatibility=ecs.Compatibility.FARGATE)

        faroptTask.add_container('FarOptImage',
                                 image=dockercontainer,
                                 cpu=4096,
                                 memory_limit_mib=16384,
                                 memory_reservation_mib=16384,
                                 environment={'s3bucket': s3.bucket_name},
                                 logging=ecs.LogDriver.aws_logs(
                                     stream_prefix='faroptlogs',
                                     log_group=w_logs))

        # ------------------------------------------------------
        # Try to trigger a fargate task from Lambda on S3 trigger

        # create lambda function
        function = _lambda.Function(self,
                                    "lambda_function",
                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                    handler="lambda-handler.main",
                                    code=_lambda.Code.asset("./lambda"),
                                    environment={
                                        'cluster_name': cluster.cluster_name,
                                        'launch_type': 'FARGATE',
                                        'task_definition':
                                        faroptTask.to_string(),
                                        'task_family': faroptTask.family,
                                        'subnet1': subnets[0].subnet_id,
                                        'subnet2': subnets[-1].subnet_id,
                                        'bucket': s3.bucket_name
                                    },
                                    initial_policy=[
                                        iam.PolicyStatement(actions=[
                                            'ecs:RunTask',
                                            'ecs:PutAccountSetting', 's3:*',
                                            'iam:PassRole'
                                        ],
                                                            resources=['*'])
                                    ])

        # create s3 notification for lambda function
        notification = aws_s3_notifications.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3.add_event_notification(_s3.EventType.OBJECT_CREATED, notification)

        # Lambda opt function with layer

        # 1- create layer

        layercode2 = _lambda.Code.from_asset(
            path="./layers/orblacknp.zip")  # adding np to the layer
        layer2 = _lambda.LayerVersion(self, id="layer2", code=layercode2)

        # 2- create function
        function2 = _lambda.Function(self,
                                     "lambda_function2",
                                     runtime=_lambda.Runtime.PYTHON_3_7,
                                     handler="lambda-handler.main",
                                     code=_lambda.Code.asset("./lambda2"),
                                     environment={
                                         'cluster_name': cluster.cluster_name,
                                         'launch_type': 'FARGATE',
                                         'task_definition':
                                         faroptTask.to_string(),
                                         'task_family': faroptTask.family,
                                         'subnet1': subnets[0].subnet_id,
                                         'subnet2': subnets[-1].subnet_id,
                                         'bucket': s3.bucket_name
                                     },
                                     timeout=core.Duration.seconds(900),
                                     memory_size=10240,
                                     layers=[layer2],
                                     initial_policy=[
                                         iam.PolicyStatement(actions=[
                                             'ecs:RunTask',
                                             'ecs:PutAccountSetting', 's3:*',
                                             'iam:PassRole',
                                             'cloudwatch:PutMetricData'
                                         ],
                                                             resources=['*'])
                                     ])

        # Lambda API resolver with faropt layer
        # 1- create layer

        layercode3 = _lambda.Code.from_asset(
            path="./layers/faroptlayer.zip")  # adding np to the layer
        layer3 = _lambda.LayerVersion(self, id="layer3", code=layercode3)

        # 2- create function
        function3 = _lambda.Function(
            self,
            "lambda_function3",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda-handler.lambda_handler",
            code=_lambda.Code.asset("./lambda3"),
            environment={
                'cluster_name': cluster.cluster_name,
                'launch_type': 'FARGATE',
                'task_definition': faroptTask.to_string(),
                'task_family': faroptTask.family,
                'subnet1': subnets[0].subnet_id,
                'subnet2': subnets[-1].subnet_id,
                'bucket': s3.bucket_name
            },
            timeout=core.Duration.seconds(120),
            memory_size=2048,
            layers=[layer3],
            initial_policy=[
                iam.PolicyStatement(actions=[
                    'ecs:RunTask', 'ecs:PutAccountSetting', 's3:*',
                    'iam:PassRole', 'cloudwatch:PutMetricData', 'ecr:*',
                    'dynamodb:*', "cloudformation:Describe*",
                    "cloudformation:Get*", "cloudformation:List*",
                    "logs:CreateLogStream", "logs:PutLogEvents"
                ],
                                    resources=['*'])
            ])

        # OUTPUTS
        core.CfnOutput(self,
                       's3output',
                       value=s3.bucket_name,
                       export_name='bucket')
        core.CfnOutput(self,
                       'jobtable',
                       value=jobtable.table_name,
                       export_name='jobtable')
        core.CfnOutput(self,
                       'recipetable',
                       value=recipetable.table_name,
                       export_name='recipetable')
        core.CfnOutput(self,
                       's3asyncoutput',
                       value=s3async.bucket_name,
                       export_name='asyncbucket')
        core.CfnOutput(self,
                       'lambdaopt',
                       value=function2.function_name,
                       export_name='lambdaopt')
Beispiel #13
0
    def CreateSVC(self,
                  ZachTaskList,
                  ZachECSNodeName,
                  cluster,
                  vpc,
                  AppendHostFile,
                  ENV_VARS,
                  choice="ELB"):
        for TaskName, TaskValue in ZachTaskList.items():
            ZachTaskDef = ecs.TaskDefinition(
                self,
                id=ZachECSNodeName + "-" + TaskName,
                compatibility=ecs.Compatibility.EC2,
                network_mode=ecs.NetworkMode.AWS_VPC)
            core.CfnOutput(self,
                           id=TaskName + "-ARN",
                           value=ZachTaskDef.task_definition_arn)
            for num in range(TaskValue.get("num", 1)):
                container = ZachTaskDef.add_container(
                    id=ZachECSNodeName + "-" + TaskName + str(num),
                    cpu=1,
                    memory_limit_mib=512,
                    memory_reservation_mib=256,
                    readonly_root_filesystem=True,
                    working_directory="/data/web",
                    user='******',
                    health_check=ecs.HealthCheck(
                        command=["ping 127.0.0.1"],
                        interval=core.Duration.seconds(30),
                        retries=5,
                        start_period=core.Duration.minutes(1),
                        timeout=core.Duration.seconds(10)),
                    hostname=ZachECSNodeName + "-" + TaskName,
                    extra_hosts=AppendHostFile,
                    environment=ENV_VARS,
                    docker_labels=ENV_VARS,
                    image=ecs.ContainerImage.from_registry(
                        TaskValue.get("image", "nginx:latest")),
                    logging=ecs.LogDrivers.fluentd())
                port_mapping = ecs.PortMapping(
                    container_port=TaskValue.get("port", 80),
                    host_port=TaskValue.get("port", 80),
                    protocol=ecs.Protocol.TCP)
                container.add_port_mappings(port_mapping)

                core.CfnOutput(self,
                               id=container.container_name + "-ContainPort",
                               value=str(container.container_port))
                core.CfnOutput(self,
                               id=container.container_name + "-MemLimit",
                               value=str(container.memory_limit_specified))
                core.CfnOutput(self,
                               id=container.container_name + "-HostPort",
                               value=str(port_mapping.host_port))

            svc = self.ELB_SVC(ZachECSNodeName, TaskName, ZachTaskDef, cluster,
                               vpc) if choice == "ELB" else self.NetworkLBSVC(
                                   ZachECSNodeName, TaskName, cluster)

        core.CfnOutput(self,
                       id=ZachECSNodeName + "-ARN",
                       value=cluster.cluster_arn)
        core.CfnOutput(self,
                       id=ZachECSNodeName + "-VPCID",
                       value=str(cluster.vpc.vpc_id))
        core.CfnOutput(self,
                       id=ZachECSNodeName + "-VPCZone",
                       value=str(cluster.vpc.availability_zones))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # If left unchecked this pattern could "fan out" on the transform and load
        # lambdas to the point that it consumes all resources on the account. This is
        # why we are limiting concurrency to 2 on all 3 lambdas. Feel free to raise this.
        lambda_throttle_size = 2

        ####
        # DynamoDB Table
        # This is where our transformed data ends up
        ####
        table = dynamo_db.Table(self,
                                "TransformedData",
                                partition_key=dynamo_db.Attribute(
                                    name="id",
                                    type=dynamo_db.AttributeType.STRING))

        ####
        # S3 Landing Bucket
        # This is where the user uploads the file to be transformed
        ####
        bucket = s3.Bucket(self, "LandingBucket")

        ####
        # Queue that listens for S3 Bucket events
        ####
        queue = sqs.Queue(self,
                          'newObjectInLandingBucketEventQueue',
                          visibility_timeout=core.Duration.seconds(300))

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      s3n.SqsDestination(queue))

        # EventBridge Permissions
        event_bridge_put_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=['*'],
            actions=['events:PutEvents'])

        ####
        # Fargate ECS Task Creation to pull data from S3
        #
        # Fargate is used here because if you had a seriously large file,
        # you could stream the data to fargate for as long as needed before
        # putting the data onto eventbridge or up the memory/storage to
        # download the whole file. Lambda has limitations on runtime and
        # memory/storage
        ####
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        logging = ecs.AwsLogDriver(stream_prefix='TheEventBridgeETL',
                                   log_retention=logs.RetentionDays.ONE_WEEK)

        cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc)

        task_definition = ecs.TaskDefinition(
            self,
            'FargateTaskDefinition',
            memory_mib="512",
            cpu="256",
            compatibility=ecs.Compatibility.FARGATE)

        # We need to give our fargate container permission to put events on our EventBridge
        task_definition.add_to_task_role_policy(event_bridge_put_policy)
        # Grant fargate container access to the object that was uploaded to s3
        bucket.grant_read(task_definition.task_role)

        container = task_definition.add_container(
            'AppContainer',
            image=ecs.ContainerImage.from_asset(
                'container/s3DataExtractionTask'),
            logging=logging,
            environment={
                'S3_BUCKET_NAME': bucket.bucket_name,
                'S3_OBJECT_KEY': ''
            })

        ####
        # Lambdas
        #
        # These are used for 4 phases:
        #
        # Extract    - kicks of ecs fargate task to download data and splinter to eventbridge events
        # Transform  - takes the two comma separated strings and produces a json object
        # Load       - inserts the data into dynamodb
        # Observe    - This is a lambda that subscribes to all events and logs them centrally
        ####

        subnet_ids = []
        for subnet in vpc.private_subnets:
            subnet_ids.append(subnet.subnet_id)

        ####
        # Extract
        # defines an AWS Lambda resource to trigger our fargate ecs task
        ####
        extract_lambda = _lambda.Function(
            self,
            "extractLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="s3SqsEventConsumer.handler",
            code=_lambda.Code.from_asset("lambdas/extract"),
            reserved_concurrent_executions=lambda_throttle_size,
            environment={
                "CLUSTER_NAME": cluster.cluster_name,
                "TASK_DEFINITION": task_definition.task_definition_arn,
                "SUBNETS": json.dumps(subnet_ids),
                "CONTAINER_NAME": container.container_name
            })
        queue.grant_consume_messages(extract_lambda)
        extract_lambda.add_event_source(_event.SqsEventSource(queue=queue))
        extract_lambda.add_to_role_policy(event_bridge_put_policy)

        run_task_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[task_definition.task_definition_arn],
            actions=['ecs:RunTask'])
        extract_lambda.add_to_role_policy(run_task_policy_statement)

        task_execution_role_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[
                task_definition.obtain_execution_role().role_arn,
                task_definition.task_role.role_arn
            ],
            actions=['iam:PassRole'])
        extract_lambda.add_to_role_policy(task_execution_role_policy_statement)

        ####
        # Transform
        # defines a lambda to transform the data that was extracted from s3
        ####

        transform_lambda = _lambda.Function(
            self,
            "TransformLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="transform.handler",
            code=_lambda.Code.from_asset("lambdas/transform"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))
        transform_lambda.add_to_role_policy(event_bridge_put_policy)

        # Create EventBridge rule to route extraction events
        transform_rule = events.Rule(
            self,
            'transformRule',
            description='Data extracted from S3, Needs transformed',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['s3RecordExtraction'],
                detail={"status": ["extracted"]}))
        transform_rule.add_target(
            targets.LambdaFunction(handler=transform_lambda))

        ####
        # Load
        # load the transformed data in dynamodb
        ####

        load_lambda = _lambda.Function(
            self,
            "LoadLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="load.handler",
            code=_lambda.Code.from_asset("lambdas/load"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3),
            environment={"TABLE_NAME": table.table_name})
        load_lambda.add_to_role_policy(event_bridge_put_policy)
        table.grant_read_write_data(load_lambda)

        load_rule = events.Rule(
            self,
            'loadRule',
            description='Data transformed, Needs loaded into dynamodb',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['transform'],
                detail={"status": ["transformed"]}))
        load_rule.add_target(targets.LambdaFunction(handler=load_lambda))

        ####
        # Observe
        # Watch for all cdkpatterns.the-eventbridge-etl events and log them centrally
        ####

        observe_lambda = _lambda.Function(
            self,
            "ObserveLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="observe.handler",
            code=_lambda.Code.from_asset("lambdas/observe"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))

        observe_rule = events.Rule(
            self,
            'observeRule',
            description='all events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl']))

        observe_rule.add_target(targets.LambdaFunction(handler=observe_lambda))
Beispiel #15
0
    def __init__(self, scope: core.Construct, id: str, sg_id: str,
                 **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        # Networking Constructs

        vpc = ec2.Vpc.from_lookup(self, "vpc", is_default=True)

        security_group = ec2.SecurityGroup.from_security_group_id(
            self, "sg", security_group_id=sg_id)

        # ECS Constructs

        cluster = ecs.Cluster(self,
                              "cluster",
                              cluster_name="FargateCluster",
                              vpc=vpc)

        taskdef = ecs.TaskDefinition(self,
                                     "blue-task-definition",
                                     compatibility=ecs.Compatibility.FARGATE,
                                     family="sample",
                                     network_mode=ecs.NetworkMode.AWS_VPC,
                                     memory_mib="512",
                                     cpu="256")

        taskdef.add_container(
            "blue",
            image=ecs.ContainerImage.from_registry("kovvuri/training:blue"),
            essential=True,
        ).add_port_mappings(
            ecs.PortMapping(container_port=80,
                            host_port=80,
                            protocol=ecs.Protocol.TCP))

        # Load Balancer Constructs

        alb = elb.ApplicationLoadBalancer(
            self,
            "alb",
            security_group=security_group,
            vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets),
            vpc=vpc)

        blue = elb.ApplicationTargetGroup(self,
                                          "blue",
                                          target_group_name="swap1",
                                          port=80,
                                          protocol=elb.Protocol.HTTP,
                                          target_type=elb.TargetType.IP,
                                          vpc=vpc)

        green = elb.ApplicationTargetGroup(self,
                                           "green",
                                           target_group_name="swap2",
                                           port=80,
                                           protocol=elb.Protocol.HTTP,
                                           target_type=elb.TargetType.IP,
                                           vpc=vpc)

        alb.add_listener("80_listener",
                         default_target_groups=[blue],
                         protocol=elb.Protocol.HTTP,
                         port=80)

        alb.add_listener(
            "3000_listener",
            default_target_groups=[green],
            protocol=elb.Protocol.HTTP,
            port=3000,
        )

        # ECS Service

        service = ecs.FargateService(
            self,
            "service",
            cluster=cluster,
            task_definition=taskdef,
            desired_count=1,
            platform_version=ecs.FargatePlatformVersion.VERSION1_3,
            deployment_controller=ecs.DeploymentController(
                type=ecs.DeploymentControllerType.CODE_DEPLOY),
            assign_public_ip=False,
            enable_ecs_managed_tags=True,
            propagate_task_tags_from=ecs.PropagatedTagSource.TASK_DEFINITION,
            security_group=security_group,
            vpc_subnets=ec2.SubnetSelection(subnets=vpc.private_subnets))

        service.attach_to_application_target_group(target_group=blue)

        CustomCodeDeploy.EcsDeploymentGroup(
            self,
            "DeploymentGroup",
            ecs_service=service.service_name,
            ecs_cluster=cluster.cluster_name,
            production_target_group=blue.target_group_name,
            test_target_group=green.target_group_name,
            production_port=80,
            test_port=3000)
Beispiel #16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create ecr repo
        repository = aws_ecr.Repository(
            self,
            "dbfit",
            image_scan_on_push=True,
            repository_name="dbfit",
        )

        # add image to repo
        dockerImageAsset = aws_ecr_assets.DockerImageAsset(
            self,
            "dbfitImg",
            directory="./",
            repository_name="dbfit",
            exclude=['node_modules', '.git', 'cdk.out'])
        # create task definition
        task_definition = aws_ecs.TaskDefinition(
            self,
            "dbfittask",
            memory_mib="512",
            cpu="256",
            network_mode=aws_ecs.NetworkMode.AWS_VPC,
            compatibility=aws_ecs.Compatibility.EC2_AND_FARGATE)
        # get image from ecr repo
        container_image = aws_ecs.ContainerImage.from_docker_image_asset(
            dockerImageAsset)

        # add container to task definition
        container = task_definition.add_container("dbfit-container",
                                                  image=container_image,
                                                  memory_reservation_mib=512,
                                                  cpu=256)

        # add port mapping to expose to outside world
        port_mapping = aws_ecs.PortMapping(container_port=8085)
        container.add_port_mappings(port_mapping)

        # get vpc reference
        vpc = aws_ec2.Vpc.from_lookup(
            self,
            "VPC",
            # This imports the default VPC but you can also
            # specify a 'vpcName' or 'tags'.
            is_default=True)
        # create cluster
        # Create an ECS cluster
        cluster = aws_ecs.Cluster(self, "Cluster", vpc=vpc)

        # create security group for ecs service
        security_group = aws_ec2.SecurityGroup(
            self,
            "dbfitSG",
            vpc=vpc,
            security_group_name="dbfitGroup",
            allow_all_outbound=True)
        # open 8085 inbound
        security_group.add_ingress_rule(aws_ec2.Peer.ipv4('0.0.0.0/0'),
                                        aws_ec2.Port.tcp(8085),
                                        'DBFit Routing')

        # start fargate service on cluster
        fargate_service = aws_ecs.FargateService(
            self,
            "dbfitservice",
            cluster=cluster,
            task_definition=task_definition,
            assign_public_ip=True,
            security_groups=[security_group])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        cluster = ecs.Cluster(
            self, "EC2Cluster",
            vpc=vpc
        )

        security_group = ec2.SecurityGroup(
            self, "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=True,
        )

        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.all_tcp(),
            description="Allow all traffic"
        )

        app_target_group = elbv2.ApplicationTargetGroup(
            self, "AppTargetGroup",
            port=http_port,
            vpc=vpc,
            target_type=elbv2.TargetType.IP,
        )

        elastic_loadbalancer = elbv2.ApplicationLoadBalancer(
            self, "ALB",
            vpc=vpc,
            internet_facing=True,
            security_group=security_group,
        )

        app_listener = elbv2.ApplicationListener(
            self, "AppListener",
            load_balancer=elastic_loadbalancer,
            port=http_port,
            default_target_groups=[app_target_group],
        )

        task_definition = ecs.TaskDefinition(
            self, "TaskDefenition",
            compatibility=ecs.Compatibility.FARGATE,
            cpu=task_def_cpu,
            memory_mib=task_def_memory_mb,
        )

        container_defenition = ecs.ContainerDefinition(
            self, "ContainerDefenition",
            image=ecs.ContainerImage.from_registry("vulnerables/web-dvwa"),
            task_definition=task_definition,
            logging=ecs.AwsLogDriver(
                stream_prefix="DemoContainerLogs",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )

        container_defenition.add_port_mappings(
            ecs.PortMapping(
                container_port=http_port,
            )
        )

        fargate_service = ecs.FargateService(
            self, "FargateService",
            task_definition=task_definition,
            cluster=cluster,
            security_group=security_group,
        )

        fargate_service.attach_to_application_target_group(
            target_group=app_target_group,
        )

        core.CfnOutput(
        self, "LoadBalancerDNS",
        value=elastic_loadbalancer.load_balancer_dns_name
        )
Beispiel #18
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)


        ###########################################################################
        # AMAZON VPC  
        ###########################################################################
        vpc = ec2.Vpc(self, "LoadTestVPC", max_azs=3)     # default is all AZs in region


        ###########################################################################
        # AMAZON ECS Repositories  
        ###########################################################################
        # get_repository = aws_ecs.IRepository(self, "get_repository", image_scan_on_push=True, removal_policy=aws_cdk.core.RemovalPolicy("DESTROY") )
        # put_repository = aws_ecs.IRepository(self, "put_repository", image_scan_on_push=True, removal_policy=aws_cdk.core.RemovalPolicy("DESTROY") )
        get_repository = aws_ecr.Repository(self, "get_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") )
        put_repository = aws_ecr.Repository(self, "put_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") )
        xray_repository = aws_ecr.Repository(self, "xray_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") )


        ###########################################################################
        # AMAZON ECS Roles and Policies
        ###########################################################################        
        task_execution_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*","ecr:*"],
            resources=["*"]
            )
        task_execution_policy_document = aws_iam.PolicyDocument()
        task_execution_policy_document.add_statements(task_execution_policy_statement)
        task_execution_policy = aws_iam.Policy(self, "task_execution_policy", document=task_execution_policy_document)
        task_execution_role = aws_iam.Role(self, "task_execution_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') )
        task_execution_role.attach_inline_policy(task_execution_policy)

        task_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["logs:*", "xray:*", "sqs:*", "s3:*"],
            resources=["*"]
            )
        task_policy_document = aws_iam.PolicyDocument()
        task_policy_document.add_statements(task_policy_statement)
        task_policy = aws_iam.Policy(self, "task_policy", document=task_policy_document)
        task_role = aws_iam.Role(self, "task_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') )
        task_role.attach_inline_policy(task_policy)


        ###########################################################################
        # AMAZON ECS Task definitions
        ###########################################################################
        get_task_definition = aws_ecs.TaskDefinition(self, "gettaskdefinition",
                                                                        compatibility=aws_ecs.Compatibility("FARGATE"), 
                                                                        cpu="1024", 
                                                                        # ipc_mode=None, 
                                                                        memory_mib="2048", 
                                                                        network_mode=aws_ecs.NetworkMode("AWS_VPC"), 
                                                                        # pid_mode=None,                                      #Not supported in Fargate and Windows containers
                                                                        # placement_constraints=None, 
                                                                        execution_role=task_execution_role, 
                                                                        # family=None, 
                                                                        # proxy_configuration=None, 
                                                                        task_role=task_role
                                                                        # volumes=None
                                                                        )

        put_task_definition = aws_ecs.TaskDefinition(self, "puttaskdefinition",
                                                                        compatibility=aws_ecs.Compatibility("FARGATE"), 
                                                                        cpu="1024", 
                                                                        # ipc_mode=None, 
                                                                        memory_mib="2048", 
                                                                        network_mode=aws_ecs.NetworkMode("AWS_VPC"), 
                                                                        # pid_mode=None,                                      #Not supported in Fargate and Windows containers
                                                                        # placement_constraints=None, 
                                                                        execution_role=task_execution_role, 
                                                                        # family=None, 
                                                                        # proxy_configuration=None, 
                                                                        task_role=task_role
                                                                        # volumes=None
                                                                        )


        ###########################################################################
        # AMAZON S3 BUCKETS 
        ###########################################################################
        storage_bucket = aws_s3.Bucket(self, "storage_bucket")


        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        ecs_task_queue_iqueue = aws_sqs.Queue(self, "ecs_task_queue_iqueue_dlq")
        ecs_task_queue_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10, queue=ecs_task_queue_iqueue)
        ecs_task_queue_queue = aws_sqs.Queue(self, "ecs_task_queue_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=ecs_task_queue_queue_dlq)


        ###########################################################################
        # AMAZON ECS Images 
        ###########################################################################
        get_repository_ecr_image = aws_ecs.EcrImage(repository=get_repository, tag="latest")
        put_repository_ecr_image = aws_ecs.EcrImage(repository=put_repository, tag="latest")
        xray_repository_ecr_image = aws_ecs.EcrImage(repository=xray_repository, tag="latest")
        environment_variables = {}
        environment_variables["SQS_QUEUE"] = ecs_task_queue_queue.queue_url
        environment_variables["S3_BUCKET"] = storage_bucket.bucket_name
        
        get_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK"))
        put_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK"))
        xray_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK"))


        get_task_definition.add_container("get_task_definition_get", 
                                                    image=get_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=get_task_log_driver
                                                    )
        get_task_definition.add_container("get_task_definition_xray", 
                                                    image=xray_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=xray_task_log_driver
                                                    )

        put_task_definition.add_container("put_task_definition_put", 
                                                    image=put_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=put_task_log_driver
                                                    )
        put_task_definition.add_container("put_task_definition_xray", 
                                                    image=xray_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=xray_task_log_driver
                                                    )


        ###########################################################################
        # AMAZON ECS CLUSTER 
        ###########################################################################
        cluster = aws_ecs.Cluster(self, "LoadTestCluster", vpc=vpc)


        ###########################################################################
        # AWS ROUTE53 HOSTED ZONE 
        ###########################################################################
        hosted_zone = aws_route53.HostedZone(self, "hosted_zone", zone_name="loadtest.com" ,comment="private hosted zone for loadtest system")
        hosted_zone.add_vpc(vpc)
        bucket_record_values = [storage_bucket.bucket_name]
        queue_record_values = [ecs_task_queue_queue.queue_url]
        bucket_record_name = "bucket." + hosted_zone.zone_name
        queue_record_name = "filesqueue." + hosted_zone.zone_name
        hosted_zone_record_bucket = aws_route53.TxtRecord(self, "hosted_zone_record_bucket", record_name=bucket_record_name, values=bucket_record_values, zone=hosted_zone, comment="dns record for bucket name")
        hosted_zone_record_queue = aws_route53.TxtRecord(self, "hosted_zone_record_queue", record_name=queue_record_name, values=queue_record_values, zone=hosted_zone, comment="dns record for queue name")