Пример #1
0
    def __init__(self, scope: core.Construct, id: str, vpc: aws_ec2.IVpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        e2e_cluster = aws_ecs.Cluster(self,
                                      'e2e-cluster',
                                      vpc=vpc,
                                      cluster_name='e2e-cluster')

        e2e_image = aws_ecr_assets.DockerImageAsset(self,
                                                    'e2e-image',
                                                    directory='test/e2e')

        e2e_task = aws_ecs.FargateTaskDefinition(self,
                                                 'e2e-task',
                                                 family='e2e-task')

        e2e_task.add_container(
            'e2e-test-kafka',
            image=aws_ecs.ContainerImage.from_docker_image_asset(e2e_image),
            logging=aws_ecs.AwsLogDriver(stream_prefix='e2e'))

        e2e_security_group = aws_ec2.SecurityGroup(self, 'e2e', vpc=vpc)
        self.e2e_security_group = e2e_security_group  # expose it to give it access to kafka

        core.CfnOutput(self,
                       "subnets",
                       value=','.join([
                           subnet.subnet_id for subnet in vpc.private_subnets
                       ]))

        core.CfnOutput(self,
                       "securitygroup",
                       value=e2e_security_group.security_group_id)
Пример #2
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: ec2.Vpc,
                 repository: ecr.Repository,
                 shared_context: Dict[str, Any],
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = vpc

        self.model_bucket = s3.Bucket.from_bucket_name(scope=self,
                                                       id=f'{id}-model-bucket',
                                                       bucket_name=shared_context['model_bucket_name'])

        self.ecs_cluster = ecs.Cluster(self,
                                       id=f'{id}-ecs',
                                       cluster_name='serving-ecs',
                                       vpc=self.vpc,
                                       container_insights=True)

        self.task_definition = ecs.FargateTaskDefinition(self,
                                                         id=f'{id}-ecs-task-definition',
                                                         memory_limit_mib=shared_context['fargate_memory_limit_mb'],
                                                         cpu=shared_context['fargate_cpu_units'])

        self.task_definition.add_to_task_role_policy(iam.PolicyStatement(
            actions=['s3:getObject'],
            effect=iam.Effect.ALLOW,
            resources=[self.model_bucket.bucket_arn, self.model_bucket.bucket_arn + '/*']
        ))

        image = ecs.ContainerImage.from_ecr_repository(repository, 'latest')

        log_driver = ecs.AwsLogDriver(
            stream_prefix=id,
            log_retention=logs.RetentionDays.FIVE_DAYS
        )

        environment = {
            'MODEL_BUCKET_NAME': shared_context['model_bucket_name']
        }

        app_container = self.task_definition.add_container(id=f'{id}-container',
                                                           image=image,
                                                           logging=log_driver,
                                                           environment=environment)

        app_container.add_port_mappings(PortMapping(container_port=shared_context['port'],
                                                    host_port=shared_context['port']))

        self.service = ecs_patterns.ApplicationLoadBalancedFargateService(self,
                                                                          id=f'{id}-fargate-service',
                                                                          assign_public_ip=True,
                                                                          cluster=self.ecs_cluster,
                                                                          desired_count=1,
                                                                          task_definition=self.task_definition,
                                                                          open_listener=True,
                                                                          listener_port=shared_context['port'],
                                                                          target_protocol=ApplicationProtocol.HTTP)
Пример #3
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # 建VPC与ECS Cluster
        # TODO: 即使指定 max_azs, 也只能部署2个AZ
        vpc = ec2.Vpc(self, "ECSVPC", cidr='10.0.0.0/16')
        cluster = ecs.Cluster(self, "ECSCluster", vpc=vpc)

        #建Task Definition
        task_definition = ecs.FargateTaskDefinition(
            self,
            "ECSDemoTaskDefinition",
            task_role=iam.Role.from_role_arn(
                self, "fargate_task_role",
                "arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens"),
            execution_role=iam.Role.from_role_arn(
                self, "fargate_task_execution_role",
                "arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole"))

        task_definition.add_volume(name="data")

        # App Container
        app_container = task_definition.add_container(
            "AppContainer",
            image=ecs.ContainerImage.from_ecr_repository(
                ecr.Repository.from_repository_name(
                    self, id="app-file-image", repository_name="app-file")),
            logging=ecs.FireLensLogDriver())

        app_container.add_mount_points(
            ecs.MountPoint(container_path="/data/logs",
                           read_only=False,
                           source_volume="data"))

        # app_container.add_port_mappings(ecs.PortMapping(container_port=80))

        # Log Router
        fluentbit_container = ecs.FirelensLogRouter(
            self,
            "fluentbit_container",
            firelens_config=ecs.FirelensConfig(
                type=ecs.FirelensLogRouterType.FLUENTBIT,
                options=ecs.FirelensOptions(config_file_value="/extra.conf")),
            task_definition=task_definition,
            image=ecs.ContainerImage.from_ecr_repository(
                ecr.Repository.from_repository_name(
                    self, id="log-router", repository_name="firelens-file")),
            logging=ecs.AwsLogDriver(
                stream_prefix="/ecs/firelens-fluentbit-demo/"))

        fluentbit_container.add_mount_points(
            ecs.MountPoint(container_path="/data/logs",
                           read_only=False,
                           source_volume="data"))
Пример #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the VPC for the honeypot(s), default is all AZs in region
        vpc = ec2.Vpc(self, "HoneypotVpc", max_azs=3)

        # Create the ECS cluster where fargate can deploy the Docker containers
        cluster = ecs.Cluster(self, "HoneypotCluster", vpc=vpc)

        # Define task definition for Fargate Service
        task_definition = ecs.FargateTaskDefinition(self,
                                                    "HoneypotTasks",
                                                    cpu=256,
                                                    memory_limit_mib=512)

        # Container definition
        container_definition = ecs.ContainerDefinition(
            self,
            "HoneypotContainerDefinition",
            image=ecs.ContainerImage.from_registry("statixs/cowrie"),
            #image=ecs.ContainerImage.from_asset(directory = "docker"),
            task_definition=task_definition,
            stop_timeout=core.Duration.seconds(2),
            logging=ecs.AwsLogDriver(
                stream_prefix="cowrie",
                log_retention=logs.RetentionDays.ONE_WEEK,
            ),
        )

        # ECS Security Group definition
        sg_ssh = ec2.SecurityGroup(self,
                                   "honeypot-sg-ssh",
                                   vpc=vpc,
                                   description="Allow SSH to the honeypot")
        sg_ssh.add_ingress_rule(ec2.Peer.ipv4("0.0.0.0/0"), ec2.Port.tcp(22))

        # Fargate service definition
        fargate_service = ecs.FargateService(
            self,
            "HoneypotFargate",
            cluster=cluster,
            assign_public_ip=True,
            desired_count=1,
            security_group=sg_ssh,
            task_definition=task_definition,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4)
Пример #5
0
    def __init__(self, scope: core.Construct, id: str,
                 infra: RtspBaseResourcesConstruct, home_base: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        core.Tags.of(self).add('home_base', home_base)

        definition = ecs.TaskDefinition(self,
                                        'DefaultTask',
                                        compatibility=ecs.Compatibility.EC2,
                                        cpu='128',
                                        memory_mib='128',
                                        task_role=infra.task_role,
                                        execution_role=infra.execution_role,
                                        network_mode=ecs.NetworkMode.AWS_VPC)

        definition.add_container(
            'DefaultContainer',
            memory_reservation_mib=128,
            image=infra.container,
            logging=ecs.AwsLogDriver(
                stream_prefix='rtsp-connector/{}'.format(home_base),
                log_group=infra.log_group),
            environment={
                'BUCKET': infra.bucket.bucket_name,
                'FRAME_ANALYZED_TOPIC': infra.frameAnalyzed.topic_arn,
                'REK_COLLECT_ID': 'homenet-hybrid-collection',
                'REGION': core.Stack.of(self).region,
            })

        ecs.Ec2Service(self,
                       'RtspConnectorService',
                       service_name='{}-rtsp-connector-{}'.format(
                           infra.landing_zone.zone_name, home_base),
                       task_definition=definition,
                       assign_public_ip=False,
                       cluster=infra.cluster,
                       deployment_controller=ecs.DeploymentController(
                           type=ecs.DeploymentControllerType.ECS),
                       security_group=infra.security_group,
                       vpc_subnets=ec2.SubnetSelection(
                           subnet_group_name=infra.subnet_group_name),
                       desired_count=desired_count)
    def __init__(self, scope: core.Stack, id: str, cluster: ecs.ICluster, vpc, private_subnets, sec_group, desired_service_count, **kwargs):
        super().__init__(scope, id, **kwargs)
        self.cluster = cluster
        self.vpc = vpc
        self.private_subnets = private_subnets
        self.sec_group = sec_group

        self.service_discovery = cluster.default_cloud_map_namespace
        self.desired_service_count = desired_service_count


        self.task_definition = ecs.FargateTaskDefinition(
            self, "BackendCrystalServiceTaskDef",
            cpu=256,
            memory_limit_mib=512,
        )

        self.task_definition.add_container(
            "BackendCrystalServiceContainer",
            image=ecs.ContainerImage.from_registry("adam9098/ecsdemo-crystal"),
            logging=ecs.AwsLogDriver(stream_prefix="ecsdemo-crystal", log_retention=logs.RetentionDays.THREE_DAYS),
        )

        self.fargate_service = ecs.FargateService(
            self, "BackendCrystalFargateService",
            service_name="Fargate-Backend-Crystal",
            task_definition=self.task_definition,
            cluster=self.cluster,
            max_healthy_percent=100,
            min_healthy_percent=0,
            vpc_subnets={
            "subnet_name" : "Private"
            },
            desired_count=self.desired_service_count,
            cloud_map_options={
                "name": "ecsdemo-crystal"
            },
            security_group=self.sec_group
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(
            self, "SqsFargateCdkPythonQueue",
            visibility_timeout=Duration.seconds(300)
        )

        nat_provider = ec2.NatProvider.instance(
            instance_type=ec2.InstanceType("t3.small")
        )

        vpc = ec2.Vpc(self, "SqsFargateCdkPythonVpc", nat_gateway_provider=nat_provider, nat_gateways=1)

        cluster = ecs.Cluster(self, "SqsFargateCdkPythonCluster", vpc=vpc)

        role = iam.Role(self, "SqsFargateCdkPythonRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        queue.grant_consume_messages(role)

        fargate_task_definition = ecs.FargateTaskDefinition(self, "SqsFargateCdkPythonFargateTaskDefinition",
                                                            memory_limit_mib=512, cpu=256,
                                                            task_role=role)

        aws_log_drive = ecs.AwsLogDriver(stream_prefix="sqs_fargate_cdk_python")

        fargate_task_definition.add_container("SqsFargateCdkPythonContainer",
                                              image=ecs.ContainerImage.from_asset("./docker"),
                                              environment={"QUEUE_URL": queue.queue_url}, logging=aws_log_drive)

        fargate_service = ecs.FargateService(self, "SqsFargateCdkPythonFargateService", cluster=cluster,
                                             task_definition=fargate_task_definition, desired_count=0)

        auto_scale_task_count = fargate_service.auto_scale_task_count(min_capacity=0, max_capacity=1)
        auto_scale_task_count.scale_on_metric("SqsFargateCdkPythonScaleOnMetric",
                                              metric=queue.metric_approximate_number_of_messages_visible(),
                                              adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
                                              cooldown=Duration.seconds(300),
                                              scaling_steps=[{"upper": 0, "change": -1}, {"lower": 1, "change": +1}])
Пример #8
0
  def __build_task(self,
    directory:str,
    repository_name:str,
    entry_point:typing.Optional[typing.List[builtins.str]] = None,
    env_vars:dict={},
    log_group_name:str=None):

    if log_group_name is None:
      log_group_name = '/finsurf/'+repository_name

    task_definition = ecs.FargateTaskDefinition(
      self,'FargateTaskDefinition')
    self.tda_secret.grant_read(task_definition.task_role)
    
    image = ecs.ContainerImage.from_docker_image_asset(
      asset=assets.DockerImageAsset(
        self,'DockerAsset',
        directory=path.join(src_root_dir,directory),
        repository_name=repository_name))

    self.log_group = logs.LogGroup(
      self,'LogGroup',
      log_group_name=log_group_name,
      removal_policy=core.RemovalPolicy.DESTROY,
      retention=logs.RetentionDays.TWO_WEEKS)
    
    env_vars.update(self.tda_env_vars)
    task_definition.add_container('DefaultContainer',
      image=image,
      entry_point=entry_point,
      logging= ecs.AwsLogDriver(
        log_group=self.log_group,
        stream_prefix=repository_name,
      ),
      environment=env_vars,
      essential=True)

    self.__task_definition = task_definition
Пример #9
0
    def __init__(
        self, scope: core.Construct, id: str, props: props_type, **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)
        ns = SimpleNamespace(**props)

        bucket_name = os.environ.get("BUCKET_NAME")
        dbt_task = ecs.FargateTaskDefinition(
            self,
            "dbt-cdk",
            family="dbt-cdk",
            cpu=512,
            memory_limit_mib=1024,
            task_role=ns.airflow_cluster.airflow_task_role,
            execution_role=ns.airflow_cluster.task_execution_role,
        )
        dbt_task.add_container(
            "dbt-cdk-container",
            image=ecs.ContainerImage.from_ecr_repository(
                ns.ecr.dbt_repo,
                os.environ.get("IMAGE_TAG", "latest"),
            ),
            logging=ecs.AwsLogDriver(
                stream_prefix="ecs", log_group=ns.airflow_cluster.dbt_log_group
            ),
            environment={
                "BUCKET_NAME": bucket_name,
                "REDSHIFT_HOST": ns.redshift.instance.cluster_endpoint.hostname,
            },
            secrets={
                "REDSHIFT_USER": ecs.Secret.from_secrets_manager(
                    ns.redshift.redshift_secret, field="username"
                ),
                "REDSHIFT_PASSWORD": ecs.Secret.from_secrets_manager(
                    ns.redshift.redshift_secret, field="password"
                ),
            },
        )
Пример #10
0
    def __init__(self, scope: core.Stack, id: str, ecs_cluster, vpc,
                 services_3000_sec_group, desired_service_count, **kwargs):
        super().__init__(scope, id, **kwargs)
        self.ecs_cluster = ecs_cluster
        self.vpc = vpc
        self.service_discovery = self.ecs_cluster.default_cloud_map_namespace
        self.services_3000_sec_group = services_3000_sec_group
        self.desired_service_count = desired_service_count

        self.task_definition = aws_ecs.FargateTaskDefinition(
            self,
            "BackendNodeServiceTaskDef",
            cpu=256,
            memory_limit_mi_b=512,
        )

        self.task_definition.add_container(
            "BackendNodeServiceContainer",
            image=aws_ecs.ContainerImage.from_registry(
                "brentley/ecsdemo-nodejs"),
            logging=aws_ecs.AwsLogDriver(
                stream_prefix="ecsdemo-nodejs",
                log_retention=aws_logs.RetentionDays.THREE_DAYS),
        )

        self.fargate_service = aws_ecs.FargateService(
            self,
            "BackendNodeFargateService",
            service_name="ecsdemo-nodejs",
            task_definition=self.task_definition,
            cluster=self.ecs_cluster,
            max_healthy_percent=100,
            min_healthy_percent=0,
            vpc_subnets=self.vpc.private_subnets,
            desired_count=self.desired_service_count,
            cloud_map_options={"name": "ecsdemo-nodejs"},
            security_group=self.services_3000_sec_group,
        )
Пример #11
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 domain_name: str,
                 identity_provider_client_id: str,
                 identity_provider_client_secret: str,
                 identity_provider_client_url: str,
                 identity_provider_realm: str,
                 identity_provider_scope: str = 'openid',
                 vpc: ec2.IVpc = None,
                 cluster: ecs.ICluster = None,
                 load_balancer: elbv2.IApplicationLoadBalancer = None,
                 log_group: logs.ILogGroup = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        if vpc is None:
            vpc = ec2.Vpc(self, 'ApplicationkVpc')

        if cluster is None:
            cluster = ecs.Cluster(self, 'ApplicationCluster', vpc=vpc)

        if log_group is None:
            log_group = logs.LogGroup(
                self,
                'ApplicationLogGroup',
                retention=logs.RetentionDays.ONE_WEEK,
                removal_policy=core.RemovalPolicy.DESTROY)

        application_task_role = iam.Role(
            self,
            'ApplicationTaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        application_hosted_zone = route53.HostedZone.from_lookup(
            self, 'ApplicationHostedZone', domain_name=domain_name)

        application_certificate = acm.DnsValidatedCertificate(
            self,
            'FrontendAlbCertificate',
            hosted_zone=application_hosted_zone,
            domain_name='app.' + domain_name)

        application_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            'ApplicationLoadBalancedFargateService',
            cluster=cluster,
            load_balancer=load_balancer,
            task_image_options=ecs_patterns.
            ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset("application"),
                enable_logging=True,
                log_driver=ecs.AwsLogDriver(stream_prefix='application',
                                            log_group=log_group),
                task_role=application_task_role,
                container_port=8080,
            ),
            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name='app.' + domain_name,
            domain_zone=application_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        application_service.target_group.enable_cookie_stickiness(
            core.Duration.seconds(24 * 60 * 60))
        application_service.target_group.configure_health_check(
            port='8080',
            path='/',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        application_service.listener.add_certificates(
            'ApplicationServiceCertificate',
            certificates=[application_certificate])

        application_service.listener.add_action(
            'DefaultAction',
            action=elbv2.ListenerAction.authenticate_oidc(
                authorization_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/auth',
                token_endpoint=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm + '/protocol/openid-connect/token',
                user_info_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/userinfo',
                issuer=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm,
                client_id=identity_provider_client_id,
                client_secret=core.SecretValue(
                    identity_provider_client_secret),
                scope=identity_provider_scope,
                on_unauthenticated_request=elbv2.UnauthenticatedAction.
                AUTHENTICATE,
                next=elbv2.ListenerAction.forward(
                    [application_service.target_group]),
            ))

        application_service.load_balancer.connections.allow_to_any_ipv4(
            port_range=ec2.Port(
                from_port=443,
                to_port=443,
                protocol=ec2.Protocol.TCP,
                string_representation='Allow ALB to verify token'))
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        vpc = _ec2.Vpc(self,
                       "ecs-vpc",
                       cidr="10.0.0.0/16",
                       nat_gateways=1,
                       max_azs=3)

        clusterAdmin = _iam.Role(self,
                                 "AdminRole",
                                 assumed_by=_iam.AccountRootPrincipal())

        cluster = _ecs.Cluster(self, "ecs-cluster", vpc=vpc)

        logging = _ecs.AwsLogDriver(stream_prefix="ecs-logs")

        taskRole = _iam.Role(
            self,
            f"ecs-taskRole-{cdk.Stack.stack_name}",
            role_name=f"ecs-taskRole-{cdk.Stack.stack_name}",
            assumed_by=_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        # ECS Contructs

        executionRolePolicy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=['*'],
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage",
                "logs:CreateLogStream", "logs:PutLogEvents"
            ])

        taskDef = _ecs.FargateTaskDefinition(self,
                                             "ecs-taskdef",
                                             task_role=taskRole)

        taskDef.add_to_execution_role_policy(executionRolePolicy)

        container = taskDef.add_container(
            'flask-app',
            image=_ecs.ContainerImage.from_registry(
                "nikunjv/flask-image:blue"),
            memory_limit_mib=256,
            cpu=256,
            logging=logging)

        container.add_port_mappings(
            _ecs.PortMapping(container_port=5000, protocol=_ecs.Protocol.TCP))

        fargateService = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "ecs-service",
            cluster=cluster,
            task_definition=taskDef,
            public_load_balancer=True,
            desired_count=3,
            listener_port=80)

        scaling = fargateService.service.auto_scale_task_count(max_capacity=6)

        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=10,
            scale_in_cooldown=cdk.Duration.seconds(300),
            scale_out_cooldown=cdk.Duration.seconds(300))

        # PIPELINE CONSTRUCTS

        # ECR Repo

        ecrRepo = ecr.Repository(self, "EcrRepo")

        gitHubSource = codebuild.Source.git_hub(
            owner='samuelhailemariam',
            repo='aws-ecs-fargate-cicd-cdk',
            webhook=True,
            webhook_filters=[
                codebuild.FilterGroup.in_event_of(
                    codebuild.EventAction.PUSH).and_branch_is('main'),
            ])

        # CODEBUILD - project

        project = codebuild.Project(
            self,
            "ECSProject",
            project_name=cdk.Aws.STACK_NAME,
            source=gitHubSource,
            environment=codebuild.BuildEnvironment(
                build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_2,
                privileged=True),
            environment_variables={
                "CLUSTER_NAME": {
                    'value': cluster.cluster_name
                },
                "ECR_REPO_URI": {
                    'value': ecrRepo.repository_uri
                }
            },
            build_spec=codebuild.BuildSpec.from_object({
                'version': "0.2",
                'phases': {
                    'pre_build': {
                        'commands': [
                            'env',
                            'export TAG=${CODEBUILD_RESOLVED_SOURCE_VERSION}'
                        ]
                    },
                    'build': {
                        'commands': [
                            'cd docker-app',
                            'docker build -t $ECR_REPO_URI:$TAG .',
                            '$(aws ecr get-login --no-include-email)',
                            'docker push $ECR_REPO_URI:$TAG'
                        ]
                    },
                    'post_build': {
                        'commands': [
                            'echo "In Post-Build Stage"', 'cd ..',
                            "printf '[{\"name\":\"flask-app\",\"imageUri\":\"%s\"}]' $ECR_REPO_URI:$TAG > imagedefinitions.json",
                            "pwd; ls -al; cat imagedefinitions.json"
                        ]
                    }
                },
                'artifacts': {
                    'files': ['imagedefinitions.json']
                }
            }))

        # PIPELINE ACTIONS

        sourceOutput = codepipeline.Artifact()
        buildOutput = codepipeline.Artifact()

        sourceAction = codepipeline_actions.GitHubSourceAction(
            action_name='GitHub_Source',
            owner='samuelhailemariam',
            repo='aws-ecs-fargate-cicd-cdk',
            branch='master',
            oauth_token=cdk.SecretValue.secrets_manager("/my/github/token"),
            output=sourceOutput)

        buildAction = codepipeline_actions.CodeBuildAction(
            action_name='codeBuild',
            project=project,
            input=sourceOutput,
            outputs=[buildOutput])

        manualApprovalAction = codepipeline_actions.ManualApprovalAction(
            action_name='Approve')

        deployAction = codepipeline_actions.EcsDeployAction(
            action_name='DeployAction',
            service=fargateService.service,
            image_file=codepipeline.ArtifactPath(buildOutput,
                                                 'imagedefinitions.json'))

        pipeline = codepipeline.Pipeline(self, "ECSPipeline")

        source_stage = pipeline.add_stage(stage_name="Source",
                                          actions=[sourceAction])

        build_stage = pipeline.add_stage(stage_name="Build",
                                         actions=[buildAction])

        approve_stage = pipeline.add_stage(stage_name="Approve",
                                           actions=[manualApprovalAction])

        deploy_stage = pipeline.add_stage(stage_name="Deploy-to-ECS",
                                          actions=[deployAction])

        ecrRepo.grant_pull_push(project.role)

        project.add_to_role_policy(
            _iam.PolicyStatement(resources=['cluster.cluster_arn'],
                                 actions=[
                                     "ecs:DescribeCluster",
                                     "ecr:GetAuthorizationToken",
                                     "ecr:BatchCheckLayerAvailability",
                                     "ecr:BatchGetImage",
                                     "ecr:GetDownloadUrlForLayer"
                                 ]))

        # OUTPUT

        cdk.CfnOutput(
            self,
            "LoadBlancer-DNS",
            value=fargateService.load_balancer.load_balancer_dns_name)
Пример #13
0
    def __init__(self, scope: core.Construct, id: str, region, domain,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC , we need one for ECS cluster ( sadly )
        vpc = ec2.Vpc.from_lookup(self, 'vpc', is_default=True)

        cluster = ecs.Cluster(self, 'Cluster', vpc=vpc)

        # Route53 & SSL Certificate
        zone = dns.HostedZone(self, "dns", zone_name=domain)

        dns.ARecord(self,
                    'MinecraftRecord',
                    zone=zone,
                    record_name='minecraft',
                    target=dns.RecordTarget(values=['1.2.3.4']))

        cert = acm.Certificate(
            self,
            'cert',
            domain_name=f'*.{domain}',
            validation=acm.CertificateValidation.from_dns(zone))

        # ECS ( Cluster, EFS, Task Def)
        fs = efs.FileSystem(self,
                            'EFS',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        task_definition = ecs.FargateTaskDefinition(self,
                                                    'TaskDef',
                                                    memory_limit_mib=4096,
                                                    cpu=1024)

        container = task_definition.add_container(
            'MinecraftDocker',
            image=ecs.ContainerImage.from_registry('darevee/minecraft-aws'),
            logging=ecs.AwsLogDriver(stream_prefix='Minecraf'),
            cpu=1024,
            memory_limit_mib=4096)
        container.add_mount_points(
            ecs.MountPoint(container_path='/minecraft',
                           source_volume='efs',
                           read_only=False))
        cfn_task = container.task_definition.node.default_child
        cfn_task.add_property_override("Volumes", [{
            "EFSVolumeConfiguration": {
                "FilesystemId": fs.file_system_id
            },
            "Name": "efs"
        }])

        container.add_port_mappings(ecs.PortMapping(container_port=25565))

        sg = ec2.SecurityGroup(self, 'sg', vpc=vpc)
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25565),
                            description='Minecraft Access')
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25575),
                            description='RCONN Access')

        fs.connections.allow_default_port_from(sg)

        subnets = ",".join(vpc.select_subnets().subnet_ids)

        # Cognito ( For ApiGW Authentication)
        userpool = cognito.UserPool(
            self,
            'UserPool',
            user_invitation=cognito.UserInvitationConfig(
                email_body=
                """No cześć {username}, zostałeś zaproszony do naszego Minecraft!
                Twoje tymczasowe hasło to {####}
                """,
                email_subject="Zaproszenie do minecrafta"))

        # APIGW (Gateway, Lambdas, S3 Static content)

        # Lambda Starter
        starter = _lambda.Function(self,
                                   'Starter',
                                   runtime=_lambda.Runtime.PYTHON_3_8,
                                   handler='index.lambda_handler',
                                   code=_lambda.Code.asset('lambda/starter'),
                                   timeout=core.Duration.seconds(300),
                                   environment={
                                       'cluster': cluster.cluster_name,
                                       'subnets': subnets,
                                       'security_groups': sg.security_group_id,
                                       'task_definition':
                                       task_definition.task_definition_arn,
                                       'region': region,
                                       'zone_id': zone.hosted_zone_id,
                                       'domain': domain
                                   })

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=[
                                    "ecs:ListTasks", "ecs:DescribeTasks",
                                    "ec2:DescribeNetworkInterfaces"
                                ]))
        starter.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[task_definition.task_definition_arn],
                actions=["ecs:RunTask", "ecs:DescribeTasks"]))
        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    task_definition.task_role.role_arn,
                                    task_definition.execution_role.role_arn
                                ],
                                actions=["iam:PassRole"]))

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[zone.hosted_zone_arn],
                                actions=["route53:ChangeResourceRecordSets"]))

        # S3 static webpage
        bucket = s3.Bucket(self,
                           "S3WWW",
                           public_read_access=True,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           website_index_document="index.html")
        s3d.BucketDeployment(self,
                             "S3Deploy",
                             destination_bucket=bucket,
                             sources=[s3d.Source.asset("static_page")])

        status = _lambda.Function(self,
                                  'Status',
                                  runtime=_lambda.Runtime.PYTHON_3_8,
                                  handler='index.lambda_handler',
                                  code=_lambda.Code.asset('lambda/status'),
                                  environment={
                                      'url': f"https://minecrafter.{domain}",
                                      'domain': domain
                                  })

        # ApiGW
        apigw = api.LambdaRestApi(self,
                                  'ApiGW',
                                  handler=status,
                                  proxy=False,
                                  domain_name={
                                      "domain_name": f'minecrafter.{domain}',
                                      "certificate": cert
                                  },
                                  default_cors_preflight_options={
                                      "allow_origins": api.Cors.ALL_ORIGINS,
                                      "allow_methods": api.Cors.ALL_METHODS
                                  })

        start = apigw.root.add_resource('start')
        start.add_method('ANY', integration=api.LambdaIntegration(starter))

        apigw.root.add_method('ANY')

        dns.ARecord(self,
                    'PointDNSToApiGW',
                    zone=zone,
                    target=dns.RecordTarget.from_alias(
                        targets.ApiGateway(apigw)),
                    record_name=f"minecrafter.{domain}")
Пример #14
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        identifier: str,
        public_ip: str,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc = ec2.Vpc(
            self,
            f"vpc-{identifier}",
            max_azs=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    name=f"public-subnet-{identifier}",
                    subnet_type=ec2.SubnetType.PUBLIC,
                    cidr_mask=28,
                )
            ],
        )

        self.cluster = ecs.Cluster(
            self,
            f"cluster-{identifier}",
            vpc=vpc,
            cluster_name=f"remote-cluster-{identifier}",
        )

        task_definition = ecs.FargateTaskDefinition(
            self,
            f"fargate-task-definition-{identifier}",
            cpu=int(os.environ.get("INSTANCE_CPU", 256)),
            memory_limit_mib=int(os.environ.get("INSTANCE_MEMORY", 512)),
        )

        log_driver = ecs.AwsLogDriver(
            stream_prefix=f"remote-workstation/{identifier}",
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        task_definition.add_container(
            f"container-definition-{identifier}",
            image=self.get_docker_image(identifier),
            logging=log_driver,
            environment={"SSH_PUBLIC_KEY": self.get_ssh_public_key()},
        )

        fargate_service = ecs.FargateService(
            self,
            f"fargate-service-{identifier}",
            assign_public_ip=True,
            cluster=self.cluster,
            desired_count=1,
            task_definition=task_definition,
        )

        for security_group in fargate_service.connections.security_groups:
            security_group.add_ingress_rule(
                peer=ec2.Peer.ipv4(f"{public_ip}/32"),
                connection=ec2.Port.tcp(22),
                description=f"SSH Access from {identifier}s Public IP",
            )
Пример #15
0
    def __init__(self, scope: core.Stack, id: str, base_module, stream_module, **kwargs):
        super().__init__(scope, id, **kwargs)
        self.base_module = base_module
        self.stream_module = stream_module

        # Added the keys manually, this needs to be imported. Likely should be an argument we pass to stack, ie self.secrets_arn = passed-in-to-constructor
        self.twitter_secrets = aws_secretsmanager.Secret(self, "TwitterSecrets").from_secret_arn(
            self, "TwitterSecretARN",
            secret_arn=self.node.try_get_context("TWITTER_SECRET_ARN")
        )

        self.cluster = aws_ecs.Cluster(
            self, "ECSCluster",
            vpc=self.base_module.vpc,
        )

        #Queue to push last updated id
        self.twitter_id_queue = aws_sqs.Queue(
            self, "TwitterWorkerQueue",
            queue_name="{}.fifo".format(self.stack_name),
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(90)
        )

        # SSM parameter to indicate if initial run has occurred
        self.initial_run_parameter = aws_ssm.StringParameter(
            self, "StringParameterInitialRun",
            parameter_name=f"/{self.stack_name}-NOT-first-run",
            string_value='False',
            description="Parameter for twitter stream feed to set to true after first run has occurred and an object has been put in queue"
        )

        self.task_definition = aws_ecs.FargateTaskDefinition(
            self, "TwitterWorkerTD",
            cpu=256,
            memory_limit_mib=512
        )

        self.task_definition.add_container(
            "ContainerImage",
            image=aws_ecs.ContainerImage.from_docker_image_asset(
                aws_ecr_assets.DockerImageAsset(
                    self, "DockerImage",
                    directory='./',
                    exclude=["cdk.out"]
                )
            ),
            logging=aws_ecs.AwsLogDriver(stream_prefix=self.stack_name, log_retention=aws_logs.RetentionDays.THREE_DAYS),
            environment={
                "FIREHOSE_NAME": self.stream_module.firehose.delivery_stream_name,
                "SQS_QUEUE_NAME": self.twitter_id_queue.queue_name,
                "SSM_PARAM_INITIAL_RUN": self.initial_run_parameter.parameter_name,
                "TWITTER_KEYWORD": os.getenv("TWITTER_KEYWORD") or 'maga',
                "SINCE_DATE": '2019-03-01',
                "WORLD_ID": '23424977'
            },
            secrets=[
                aws_ecs.Secret.from_secrets_manager(
                    secret=self.twitter_secrets
                )
            ]
        )

        # IAM Permissions for fargate task
        # Adding firehose put policy
        self.firehose_iam_policy_statement = aws_iam.PolicyStatement()
        self.firehose_iam_policy_statement.add_actions('firehose:Put*')
        self.firehose_iam_policy_statement.add_resources(self.stream_module.firehose.attr_arn)

        # Permission to talk to comprehend for sentiment analysis
        self.comprehend_iam_policy_statement = aws_iam.PolicyStatement()
        self.comprehend_iam_policy_statement.add_actions('comprehend:*')
        self.comprehend_iam_policy_statement.add_all_resources()

        self.task_iam_policy = aws_iam.Policy(
            self, "IAMPolicyTwitterStreamFargate",
            policy_name="TwitterStreamingFargate-{}".format(self.stack_name),
            statements=[self.firehose_iam_policy_statement],
        )

        self.twitter_secrets.grant_read(self.task_definition.task_role)
        self.task_iam_policy.attach_to_role(self.task_definition.task_role)
        self.twitter_id_queue.grant_send_messages(self.task_definition.task_role)
        self.twitter_id_queue.grant_consume_messages(self.task_definition.task_role)
        self.initial_run_parameter.grant_read(self.task_definition.task_role)
        self.initial_run_parameter.grant_write(self.task_definition.task_role)

        self.fargate_service = aws_ecs.FargateService(
            self, "TwitterWorker",
            service_name=self.stack_name,
            task_definition=self.task_definition,
            cluster=self.cluster
        )
Пример #16
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = props['vpc']
        endpoint_sg = props['endpoint_sg']
        cluster = props['cluster']

        # タスク実行ロールの作成
        task_execution_role_policy = iam.ManagedPolicy.from_aws_managed_policy_name(
            'service-role/AmazonECSTaskExecutionRolePolicy')
        task_execution_role = iam.Role(
            self,
            'TaskExecutionRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            managed_policies=[task_execution_role_policy])

        # タスクロールの作成
        task_role = iam.Role(
            self,
            'TaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        # リポジトリを指定する
        repository = ecr.Repository.from_repository_name(
            self, 'Frontend', 'frontend')

        # タスク定義の作成
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#task-definitions
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html

        task_definition = ecs.FargateTaskDefinition(
            self,
            'TaskDef',
            memory_limit_mib=512,
            cpu=256,
            execution_role=task_execution_role,
            task_role=task_role)
        container = task_definition.add_container(
            'Container',
            image=ecs.ContainerImage.from_ecr_repository(repository=repository,
                                                         tag='latest'),
            logging=ecs.AwsLogDriver(stream_prefix='/ecs/'),
            environment={
                'BACKEND_URL': 'http://backend.mycluster.local:5000/messages'
            })
        container.add_port_mappings(ecs.PortMapping(container_port=5000))

        # ALB用セキュリティーグループ
        alb_sg = ec2.SecurityGroup(self, 'ALBSecurityGroup', vpc=vpc)

        # ALBを作成
        alb = elbv2.ApplicationLoadBalancer(self,
                                            'ALB',
                                            vpc=vpc,
                                            internet_facing=True,
                                            security_group=alb_sg)

        # # 80番ポートへのトラフィックを許可
        # alb_sg.add_ingress_rule(
        #     peer=ec2.Peer.any_ipv4(),
        #     connection=ec2.Port.tcp(80)
        # )
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80))

        # Frontendサービス用セキュリティーグループ
        frontend_service_sg = ec2.SecurityGroup(self,
                                                'FrontendServiceSecurityGroup',
                                                vpc=vpc)

        # サービスの作成
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#service
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.html

        frontend_service = ecs.FargateService(
            self,
            'FrontendService',
            cluster=cluster,
            task_definition=task_definition,
            min_healthy_percent=50,
            max_healthy_percent=200,
            desired_count=2,
            security_group=frontend_service_sg,
            cloud_map_options=ecs.CloudMapOptions(name='frontend'))

        # ALB用セキュリティグループからのトラフィックを許可
        frontend_service.connections.allow_from(alb, ec2.Port.all_traffic())
        # 自身のセキュリティグループからのトラフィックを許可
        frontend_service.connections.allow_internally(ec2.Port.all_traffic())
        # エンドポイントのセキュリティグループへのアクセスを許可
        frontend_service.connections.allow_to(endpoint_sg,
                                              ec2.Port.all_traffic())

        # ApplicationLister
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationListener.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationListener

        listener = alb.add_listener('Listener', port=80)

        listener.add_targets('ECS',
                             port=5000,
                             protocol=elbv2.ApplicationProtocol.HTTP,
                             targets=[frontend_service],
                             health_check=elbv2.HealthCheck(
                                 path='/health',
                                 interval=core.Duration.seconds(10),
                                 healthy_threshold_count=2))

        core.CfnOutput(self,
                       'LoadBalancerDNS',
                       description='Load Balancer DNS Name',
                       value=alb.load_balancer_dns_name)

        self.output_props = props.copy()
        self.output_props['frontend_service'] = frontend_service
Пример #17
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 stack_name: str,
                 task_definition_cpu: int,
                 task_definition_memory_limit_mib: int,
                 docker_image_name: str,
                 container_port: int,
                 desired_container_count: int,
                 private_subnets: Sequence[aws_ec2.Subnet] = None,
                 public_subnets: Sequence[aws_ec2.Subnet] = None,
                 private_security_group: aws_ec2.SecurityGroup = None,
                 public_security_group: aws_ec2.SecurityGroup = None,
                 vpc: aws_ec2.Vpc = None,
                 fargate_cluster: aws_ecs.Cluster = None,
                 authorizer_lambda_arn: str = None,
                 authorizer_lambda_role_arn: str = None,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        # Role
        self.role = aws_iam.Role(
            self,
            'Role',
            assumed_by=aws_iam.ServicePrincipal(service='ecs.amazonaws.com'),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name=
                    'service-role/AmazonECSTaskExecutionRolePolicy')
            ],
            inline_policies={
                id:
                aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        effect=aws_iam.Effect.ALLOW,
                        actions=[
                            'kms:Encrypt',
                            'kms:Decrypt',
                            'kms:ReEncrypt*',
                            'kms:GenerateDataKey*',
                            'kms:DescribeKey',
                            'ec2:CreateNetworkInterface',
                            'ec2:DescribeNetworkInterfaces',
                            'ec2:DeleteNetworkInterface',
                            # Remaining actions from https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-ecs.html
                            'elasticloadbalancing:DeregisterInstancesFromLoadBalancer',
                            'elasticloadbalancing:DeregisterTargets',
                            'elasticloadbalancing:Describe*',
                            'elasticloadbalancing:RegisterInstancesWithLoadBalancer',
                            'elasticloadbalancing:RegisterTargets',
                            'ec2:Describe*',
                            'ec2:AuthorizeSecurityGroupIngress'
                        ],
                        resources=['*'])
                ])
            })
        self.role.assume_role_policy.add_statements(
            aws_iam.PolicyStatement(
                actions=['sts:AssumeRole'],
                principals=[
                    aws_iam.ServicePrincipal(service='ecs-tasks.amazonaws.com')
                ]))

        # Set Defaults if parameters are None
        if vpc is None:
            vpc = aws_ec2.Vpc(self, 'Vpc')

        if private_subnets is None:
            private_subnets = vpc.private_subnets

        if public_subnets is None:
            public_subnets = vpc.public_subnets

        if public_security_group is None:
            public_security_group = aws_ec2.SecurityGroup(
                self, 'PublicSecurityGroup', vpc=vpc, allow_all_outbound=True)
            # Allow inbound HTTP traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=80))
            # Allow inbound HTTPS traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=443))

        if private_security_group is None:
            private_security_group = aws_ec2.SecurityGroup(
                self, 'PrivateSecurityGroup', vpc=vpc, allow_all_outbound=True)

            public_subnet_cidr_blocks = Utils.get_subnet_cidr_blocks(
                public_subnets)

            # Create an ingress rule for each of the NLB's subnet's CIDR ranges and add the rules to the ECS service's
            # security group.  This will allow requests from the NLB to go into the ECS service.  This allow inbound
            # traffic from public subnets.
            for cidr_block in public_subnet_cidr_blocks:
                private_security_group.add_ingress_rule(
                    peer=aws_ec2.Peer.ipv4(cidr_ip=cidr_block),
                    connection=aws_ec2.Port.tcp(port=container_port))

        if fargate_cluster is None:
            fargate_cluster = aws_ecs.Cluster(
                self,
                'FargateCluster',
            )

        task_def = aws_ecs.FargateTaskDefinition(
            self,
            'TaskDefinition',
            cpu=task_definition_cpu,
            memory_limit_mib=task_definition_memory_limit_mib,
            task_role=self.role,
            execution_role=self.role)

        container = aws_ecs.ContainerDefinition(
            self,
            'Container',
            image=aws_ecs.ContainerImage.from_registry(name=docker_image_name),
            task_definition=task_def,
            logging=aws_ecs.AwsLogDriver(stream_prefix='/ecs'))
        container.add_port_mappings(
            aws_ecs.PortMapping(container_port=container_port,
                                protocol=aws_ec2.Protocol.TCP))

        ecs_service = aws_ecs.FargateService(
            self,
            'FargateService',
            cluster=fargate_cluster,
            task_definition=task_def,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=private_subnets),
            security_group=private_security_group,
            desired_count=desired_container_count)

        target_group = aws_elasticloadbalancingv2.NetworkTargetGroup(
            self,
            'TargetGroup',
            port=80,  # Health check occurs over HTTP
            health_check=aws_elasticloadbalancingv2.HealthCheck(
                protocol=aws_elasticloadbalancingv2.Protocol.TCP),
            targets=[ecs_service],
            vpc=vpc)

        nlb = aws_elasticloadbalancingv2.NetworkLoadBalancer(
            self,
            'NetworkLoadBalancer',
            vpc=vpc,
            internet_facing=False,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=public_subnets),
        )
        nlb.add_listener(
            id='Listener',
            port=80,  # HTTP listener
            default_target_groups=[target_group])

        # nlb.log_access_logs(  # todo:  add this later when you have time to research the correct bucket policy.
        #     bucket=aws_s3.Bucket(
        #         self, 'LoadBalancerLogBucket',
        #         bucket_name='load-balancer-logs',
        #         public_read_access=False,
        #         block_public_access=aws_s3.BlockPublicAccess(
        #             block_public_policy=True,
        #             restrict_public_buckets=True
        #         )
        #     )
        # )

        # Dependencies
        ecs_service.node.add_dependency(nlb)

        # API Gateway
        rest_api = aws_apigateway.RestApi(self, stack_name)
        resource = rest_api.root.add_resource(
            path_part='{proxy+}',
            default_method_options=aws_apigateway.MethodOptions(
                request_parameters={'method.request.path.proxy': True}))

        token_authorizer = None
        if authorizer_lambda_arn and authorizer_lambda_role_arn:
            token_authorizer = aws_apigateway.TokenAuthorizer(  #todo: make this a parameter?
                self,
                'JwtTokenAuthorizer',
                results_cache_ttl=core.Duration.minutes(5),
                identity_source='method.request.header.Authorization',
                assume_role=aws_iam.Role.from_role_arn(
                    self,
                    'AuthorizerLambdaInvokationRole',
                    role_arn=authorizer_lambda_role_arn),
                handler=aws_lambda.Function.from_function_arn(
                    self,
                    'AuthorizerLambda',
                    function_arn=authorizer_lambda_arn))

        resource.add_method(
            http_method='ANY',
            authorization_type=aws_apigateway.AuthorizationType.CUSTOM,
            authorizer=token_authorizer,
            integration=aws_apigateway.HttpIntegration(
                url=f'http://{nlb.load_balancer_dns_name}/{{proxy}}',
                http_method='ANY',
                proxy=True,
                options=aws_apigateway.IntegrationOptions(
                    request_parameters={
                        'integration.request.path.proxy':
                        'method.request.path.proxy'
                    },
                    connection_type=aws_apigateway.ConnectionType.VPC_LINK,
                    vpc_link=aws_apigateway.VpcLink(
                        self,
                        'VpcLink',
                        description=
                        f'API Gateway VPC Link to internal NLB for {stack_name}',
                        vpc_link_name=stack_name,
                        targets=[nlb]))))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get config value for alert email
        email = self.node.try_get_context("email")
        if email == 'changeme@localhost':
            exit(
                'ERROR: Change the email in cdk.json or pass it with -c email=changeme@localhost'
            )

        # Create SNS for alarms to be sent to
        alarm_topic = sns.Topic(self,
                                "backup_alarm",
                                display_name="backup_alarm")

        # Subscribe my email so the alarms go to me
        alarm_topic.add_subscription(subscriptions.EmailSubscription(email))

        # Create VPC to run everything in. We make this public just because we don't
        # want to spend $30/mo on a NAT gateway.
        vpc = ec2.Vpc(
            self,
            "VPC",
            nat_gateways=0,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        subnet_type=ec2.SubnetType.PUBLIC)
            ],
        )

        ecs_sg = ec2.SecurityGroup(self, "ecs_sg", vpc=vpc)
        efs_sg = ec2.SecurityGroup(self, "efs_sg", vpc=vpc)
        efs_sg.add_ingress_rule(
            peer=ecs_sg,
            connection=ec2.Port.tcp(2049),
            description="Allow backup runner access",
        )
        # Open this to the VPC
        efs_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4("10.0.0.0/8"),
            connection=ec2.Port.tcp(2049),
            description="Allow backup runner access",
        )

        # Define the EFS
        fileSystem = efs.FileSystem(
            self,
            "MyEfsFileSystem",
            vpc=vpc,
            encrypted=True,
            lifecycle_policy=efs.LifecyclePolicy.AFTER_7_DAYS,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
            security_group=efs_sg,
        )

        # Define the ECS task
        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)
        taskDefinition = ecs.FargateTaskDefinition(
            self,
            "taskDefinition",
            volumes=[
                ecs.Volume(
                    name="efsvolume",
                    efs_volume_configuration=ecs.EfsVolumeConfiguration(
                        file_system_id=fileSystem.file_system_id,
                        root_directory="/",
                        transit_encryption="ENABLED",
                    ),
                )
            ],
            memory_limit_mib=8192,
            cpu=2048,
        )

        log_driver = ecs.AwsLogDriver(
            stream_prefix="backup_runner",
            log_retention=logs.RetentionDays.TWO_WEEKS,
        )

        taskDefinition.add_container(
            "backup-runner",
            image=ecs.ContainerImage.from_asset("./resources/backup_runner"),
            memory_limit_mib=8192,
            cpu=2048,
            logging=log_driver,
        )

        # The previous method to add the container doesn't let us specify the mount point for the EFS,
        # so we have to do it here, and referencing the container that was just added.
        taskDefinition.default_container.add_mount_points(
            ecs.MountPoint(container_path="/mnt/efs",
                           read_only=False,
                           source_volume="efsvolume"))

        # Create rule to trigger this be run every 24 hours
        events.Rule(
            self,
            "scheduled_run",
            rule_name="backup_runner",
            # Run at 2am EST (6am UTC) every night
            schedule=events.Schedule.expression("cron(0 0 * * ? *)"),
            description="Starts the backup runner task every night",
            targets=[
                targets.EcsTask(
                    cluster=cluster,
                    task_definition=taskDefinition,
                    subnet_selection=ec2.SubnetSelection(
                        subnet_type=ec2.SubnetType.PUBLIC),
                    platform_version=ecs.FargatePlatformVersion.
                    VERSION1_4,  # Required to use EFS
                    # Because "Latest" does not yet support EFS
                    security_groups=[ecs_sg],
                )
            ],
        )

        # Create notification topic for backups
        backup_topic = sns.Topic(self,
                                 "backup_topic",
                                 display_name="Backup status")

        # Create AWS Backup
        vault = backup.BackupVault(
            self,
            "Vault",
            access_policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.DENY,
                    actions=[
                        "backup:DeleteBackupVault",
                        "backup:DeleteRecoveryPoint",
                        "backup:UpdateRecoveryPointLifecycle",
                        # "backup:PutBackupVaultAccessPolicy", # This results in "Failed putting policy for Backup vault backuprunnerVaultXXX as it will lock down from further policy changes"
                        "backup:DeleteBackupVaultAccessPolicy",
                        "backup:DeleteBackupVaultNotifications",
                        # "backup:PutBackupVaultNotifications", # This causes oher part of this app to fail.
                    ],
                    resources=["*"],
                    principals=[iam.AnyPrincipal()],
                )
            ]),
            notification_topic=alarm_topic,
            notification_events=[
                # Monitor for some failures or access to the backups
                backup.BackupVaultEvents.BACKUP_JOB_EXPIRED,
                backup.BackupVaultEvents.BACKUP_JOB_FAILED,
                backup.BackupVaultEvents.COPY_JOB_FAILED,
                backup.BackupVaultEvents.COPY_JOB_FAILED,
                backup.BackupVaultEvents.COPY_JOB_STARTED,
                backup.BackupVaultEvents.RESTORE_JOB_COMPLETED,
                backup.BackupVaultEvents.RESTORE_JOB_FAILED,
                backup.BackupVaultEvents.RESTORE_JOB_STARTED,
                backup.BackupVaultEvents.RESTORE_JOB_SUCCESSFUL,
            ],
        )

        plan = backup.BackupPlan.daily35_day_retention(self, "backup")
        plan.add_selection(
            "Selection",
            resources=[backup.BackupResource.from_efs_file_system(fileSystem)],
        )

        #
        # Create metric filter for errors in the CloudWatch Logs from the ECS
        #
        METRIC_NAME = "log_errors"
        METRIC_NAMESPACE = "backup_runner"

        metric = cloudwatch.Metric(namespace=METRIC_NAMESPACE,
                                   metric_name=METRIC_NAME)

        error_metric = logs.MetricFilter(
            self,
            "MetricFilterId",
            metric_name=METRIC_NAME,
            metric_namespace=METRIC_NAMESPACE,
            log_group=log_driver.log_group,
            filter_pattern=logs.FilterPattern.any_term("ERROR"),
            metric_value="1",
        )

        error_alarm = cloudwatch.Alarm(
            self,
            "AlarmId",
            metric=metric,
            evaluation_periods=1,
            actions_enabled=True,
            alarm_name="backuper_runner_alarm",
            alarm_description="Errors in backup runner",
            comparison_operator=cloudwatch.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            treat_missing_data=cloudwatch.TreatMissingData.NOT_BREACHING,
            period=core.Duration.hours(1),
            threshold=1,
            statistic="sum",
        )

        # Connect the alarm to the SNS
        error_alarm.add_alarm_action(cloudwatch_actions.SnsAction(alarm_topic))

        # The above doesn't give it privileges, so add them to the alarm topic resource policy.
        alarm_topic.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["sns:Publish"],
                resources=[alarm_topic.topic_arn],
                principals=[iam.ServicePrincipal("cloudwatch.amazonaws.com")],
            ))
Пример #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        cluster = ecs.Cluster(
            self, "EC2Cluster",
            vpc=vpc
        )

        security_group = ec2.SecurityGroup(
            self, "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=True,
        )

        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.all_tcp(),
            description="Allow all traffic"
        )

        app_target_group = elbv2.ApplicationTargetGroup(
            self, "AppTargetGroup",
            port=http_port,
            vpc=vpc,
            target_type=elbv2.TargetType.IP,
        )

        elastic_loadbalancer = elbv2.ApplicationLoadBalancer(
            self, "ALB",
            vpc=vpc,
            internet_facing=True,
            security_group=security_group,
        )

        app_listener = elbv2.ApplicationListener(
            self, "AppListener",
            load_balancer=elastic_loadbalancer,
            port=http_port,
            default_target_groups=[app_target_group],
        )

        task_definition = ecs.TaskDefinition(
            self, "TaskDefenition",
            compatibility=ecs.Compatibility.FARGATE,
            cpu=task_def_cpu,
            memory_mib=task_def_memory_mb,
        )

        container_defenition = ecs.ContainerDefinition(
            self, "ContainerDefenition",
            image=ecs.ContainerImage.from_registry("vulnerables/web-dvwa"),
            task_definition=task_definition,
            logging=ecs.AwsLogDriver(
                stream_prefix="DemoContainerLogs",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )

        container_defenition.add_port_mappings(
            ecs.PortMapping(
                container_port=http_port,
            )
        )

        fargate_service = ecs.FargateService(
            self, "FargateService",
            task_definition=task_definition,
            cluster=cluster,
            security_group=security_group,
        )

        fargate_service.attach_to_application_target_group(
            target_group=app_target_group,
        )

        core.CfnOutput(
        self, "LoadBalancerDNS",
        value=elastic_loadbalancer.load_balancer_dns_name
        )
Пример #20
0
    def __init__(self, scope: core.App, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        subnets = []
        subnets.append(
            aws_ec2.SubnetConfiguration(name="DeviceSubnet",
                                        subnet_type=aws_ec2.SubnetType.PUBLIC,
                                        cidr_mask=24))

        vpc = aws_ec2.Vpc(self,
                          "DeviceVpc",
                          max_azs=2,
                          subnet_configuration=subnets)

        # Iterate the private subnets
        selection = vpc.select_subnets(subnet_type=aws_ec2.SubnetType.PUBLIC)

        sg = aws_ec2.SecurityGroup(
            self,
            id="FarGateSecGroup",
            vpc=vpc,
            allow_all_outbound=True,
            description="Allow access to virtual device",
            security_group_name="Virtual Device Security Group")

        sg.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                            connection=aws_ec2.Port.tcp(80))

        rnd_suffix = create_random_name(4).lower()

        # pipeline requires versioned bucket
        bucket = aws_s3.Bucket(self,
                               "SourceBucket",
                               bucket_name="{}-{}-{}".format(
                                   props['namespace'].lower(),
                                   core.Aws.ACCOUNT_ID, core.Aws.REGION),
                               versioned=True,
                               removal_policy=core.RemovalPolicy.DESTROY)

        # ssm parameter to get bucket name later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterBucketName",
            parameter_name=f"{props['namespace']}-bucket",
            string_value=bucket.bucket_name,
            description='IoT playground pipeline bucket')

        # ecr repo to push docker container into
        ecr = aws_ecr.Repository(self,
                                 "ECR",
                                 repository_name=f"{props['namespace']}",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        # codebuild project meant to run in pipeline
        cb_docker_build = aws_codebuild.PipelineProject(
            self,
            "DockerBuild",
            project_name=f"{props['namespace']}-Docker-Build",
            build_spec=aws_codebuild.BuildSpec.from_source_filename(
                filename='docker/docker_build_buildspec.yml'),
            environment=aws_codebuild.BuildEnvironment(privileged=True, ),

            # pass the ecr repo uri into the codebuild project so codebuild knows where to push
            environment_variables={
                'ecr':
                aws_codebuild.BuildEnvironmentVariable(
                    value=ecr.repository_uri),
                'tag':
                aws_codebuild.BuildEnvironmentVariable(value='virtual_device')
            },
            description='Pipeline for CodeBuild',
            timeout=core.Duration.minutes(10),
        )
        # codebuild iam permissions to read write s3
        bucket.grant_read_write(cb_docker_build)

        # codebuild permissions to interact with ecr
        ecr.grant_pull_push(cb_docker_build)

        ecs_cluster = aws_ecs.Cluster(self, 'DeviceCluster', vpc=vpc)

        fargate_task_def = aws_ecs.FargateTaskDefinition(
            self,
            'DeviceTaskDef',
            cpu=512,
            memory_limit_mib=1024
            #network_mode=aws_ecs.NetworkMode.AWS_VPC,
        )

        # fargate_task_def.add_to_task_role_policy(aws_iam.PolicyStatement(
        #     effect=aws_iam.Effect.ALLOW,
        #     actions=[
        #         "s3:PutObject"],
        #     resources=["*"]
        # ))

        fargate_task_def.add_to_execution_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=[
                                        "ecr:GetAuthorizationToken",
                                        "ecr:BatchCheckLayerAvailability",
                                        "ecr:GetDownloadUrlForLayer",
                                        "ecr:BatchGetImage",
                                        "logs:CreateLogStream",
                                        "logs:PutLogEvents"
                                    ],
                                    resources=["*"]))

        container_image = aws_ecs.EcrImage(repository=ecr,
                                           tag="virtual_device")

        logging = aws_ecs.AwsLogDriver(stream_prefix="virtual_device")

        container = fargate_task_def.add_container("DeviceContainer",
                                                   image=container_image,
                                                   cpu=512,
                                                   memory_limit_mib=1024,
                                                   logging=logging,
                                                   essential=True)

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           host_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        container.add_port_mappings(port_mapping)

        # The code that defines your stack goes here
        table = aws_dynamodb.Table(self,
                                   "DeviceFactoryCatalog",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=3,
                                   write_capacity=3)

        function = aws_lambda.Function(
            self,
            "DeviceFactoryLambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("../lambdas/device_factory_lambda"),
            timeout=Duration.minutes(1))

        function.add_environment("BUCKET_NAME", bucket.bucket_name)
        function.add_environment("ECS_CLUSTER", ecs_cluster.cluster_name)
        function.add_environment("ECS_TASK_DEF",
                                 fargate_task_def.task_definition_arn)
        function.add_environment("DDB_TABLE_DEVICE_CATALOG", table.table_name)
        function.add_environment("SUBNET_1", selection.subnets[0].subnet_id)
        function.add_environment("SUBNET_2", selection.subnets[1].subnet_id)
        function.add_environment("SEC_GROUP", sg.security_group_id)

        table.grant_read_write_data(function)

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iot:*"],
                                    resources=["*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:PutObject", "s3:GetObject"],
                resources=["{}/*".format(bucket.bucket_arn)]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iam:PassRole"],
                                    resources=["arn:aws:iam::*:role/*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["ecs:RunTask", "ecs:StopTask"],
                                    resources=["*"]))

        api_gtw = aws_apigateway.LambdaRestApi(
            self,
            id="DeviceFactoryApi",
            rest_api_name="DeviceFactoryApi",
            handler=function)

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterDeviceFactoryEndpoint",
            parameter_name=f"{props['namespace']}-devicefactoryendpoint",
            string_value=api_gtw.url,
            description='IoT playground device factory endpoint')

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterEcrUri",
            parameter_name=f"{props['namespace']}-ecruri",
            string_value=ecr.repository_uri,
            description='IoT playground ECR URI')

        # ssm parameter to get cluster name
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterClusterName",
            parameter_name=f"{props['namespace']}-clustername",
            string_value=ecs_cluster.cluster_name,
            description='IoT playground Cluster Name')

        core.CfnOutput(
            self,
            "EcrUri",
            description="ECR URI",
            value=ecr.repository_uri,
        )

        core.CfnOutput(self,
                       "S3Bucket",
                       description="S3 Bucket",
                       value=bucket.bucket_name)

        core.CfnOutput(self,
                       "DeviceFactoryEndpoint",
                       description="Device Factory Endpoint",
                       value=api_gtw.url)

        self.output_props = props.copy()
        self.output_props['bucket'] = bucket
        self.output_props['cb_docker_build'] = cb_docker_build
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # TODO: add resources for Part 2, 3 of blog post.

        # Kinesis Data Streams
        kds = kinesis.Stream(self,
                             "KinesisTweets",
                             stream_name="kinesis-tweets",
                             shard_count=5,
                             retention_period=Duration.hours(48))

        # Fargate Task Role
        task_role = iam.Role(
            self,
            'task_role',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        # Policy to allow task to put records into Kinessis
        task_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=[
                                    'kinesis:PutRecord', 'kinesis:PutRecords',
                                    'kinesis:DescribeStream'
                                ],
                                resources=[kds.stream_arn]))
        # Policy to get secret from SecretsManager
        task_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=[
                                    'secretsmanager:GetResourcePolicy',
                                    'secretsmanager:GetSecretValue',
                                    'secretsmanager:DescribeSecret',
                                    'secretsmanager:ListSecretVersionIds'
                                ],
                                resources=['*']))

        # VPC
        vpc = ec2.Vpc(
            self,
            'FargateVPC',
            max_azs=2  # Default is all AZs in the region
        )

        # ECS Cluster
        cluster = ecs.Cluster(self, 'EcsCluster', vpc=vpc)

        # Fargate Task Definition
        task_definition = ecs.FargateTaskDefinition(self,
                                                    'ServiceTaskDefinition',
                                                    cpu=256,
                                                    memory_limit_mib=512,
                                                    task_role=task_role)

        # Fargate log driver
        fargate_logger = ecs.AwsLogDriver(stream_prefix='fargate_twitter_logs')

        # Container
        task_definition.add_container(
            'ServiceContainer',
            image=ecs.ContainerImage.from_asset('./ECSContainerFiles'),
            environment={
                'KINESIS_STREAM_NAME': kds.stream_name,
                'REGION_NAME': self.region,
                'KEYWORD': 'trump',
                'SECRETS_NAME': 'TwitterAPISecrets'
            },
            logging=fargate_logger)

        # Fargate Service
        service = ecs.FargateService(self,
                                     'ServiceFargateService',
                                     task_definition=task_definition,
                                     assign_public_ip=True,
                                     cluster=cluster)
Пример #22
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, repository: ecr.Repository, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        namespace = servicediscovery.PrivateDnsNamespace(
            scope=self,
            id="PRIVATE-DNS",
            vpc=vpc,
            name="private",
            description="a private dns"
        )

        sg = ec2.SecurityGroup(
            scope=self,
            id="SG",
            vpc=vpc,
            allow_all_outbound=True,
            description="open 9200 and 9300 ports",
            security_group_name="es-group"
        )
        sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(port=9200),
        )
        sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(port=9300),
        )

        #####################################################
        elastic_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="ES-TASK-DEF",
            network_mode=ecs.NetworkMode.AWS_VPC,
            volumes=[ecs.Volume(
                name="esdata",
                host=ecs.Host(source_path="/usr/share/elasticsearch/data"),
            )],
        )

        elastic = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=30),
            task_definition=elastic_task_def,
            memory_limit_mib=4500,
            essential=True,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=repository, tag='latest'),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                # "discovery.zen.ping.unicast.hosts": "elasticsearch",
                "node.name": constants.ES_CONTAINER_NAME,
                "node.master": "true",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms4g -Xmx4g",
            },
            logging=ecs.AwsLogDriver(
                stream_prefix="ES",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535))
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))

        elastic.add_port_mappings(ecs.PortMapping(container_port=9200))
        elastic.add_port_mappings(ecs.PortMapping(container_port=9300))

        elastic.add_mount_points(ecs.MountPoint(
            container_path="/usr/share/elasticsearch/data",
            source_volume="esdata",
            read_only=False,
        ))
        # elastic.add_volumes_from(ecs.VolumeFrom(
        #     source_container="esdata",
        #     read_only=False,
        #     ))

        es_service = ecs.Ec2Service(
            scope=self,
            id="ES-SERVICE",
            cluster=cluster,
            task_definition=elastic_task_def,
            desired_count=1,
            service_name="ES",
            security_group=sg,
        )

        es_lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="ES-ELB",
            vpc=vpc,
            internet_facing=True,
        )
        es_listener = es_lb.add_listener(
            id="ES-LISTENER",
            port=80,
        )
        es_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="ES-GRP",
                container_name=elastic.container_name,
                listener=ecs.ListenerConfig.application_listener(
                    listener=es_listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))

        service = es_service.enable_cloud_map(
            cloud_map_namespace=namespace,
            dns_record_type=servicediscovery.DnsRecordType.A,
            # dns_ttl=core.Duration.seconds(amount=30),
            failure_threshold=1,
            name="elastic",
        )

        core.CfnOutput(
            scope=self,
            id="DNS-ES",
            value=es_lb.load_balancer_dns_name,
        )

        #####################################################

        node_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="NODE-TASK-DEF",
            network_mode=ecs.NetworkMode.AWS_VPC,
            volumes=[ecs.Volume(
                name="esdata",
                host=ecs.Host(source_path="/usr/share/elasticsearch/data"),
            )],
        )

        node = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_NODE_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=40),
            task_definition=node_task_def,
            memory_limit_mib=4500,
            essential=True,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=repository, tag='latest'),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                "discovery.zen.ping.unicast.hosts": "elastic.private",
                "node.name": constants.ES_NODE_CONTAINER_NAME,
                "node.master": "false",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms4g -Xmx4g",
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="NODE",
                log_retention=logs.RetentionDays.ONE_DAY,
            ))

        node.add_port_mappings(ecs.PortMapping(container_port=9200))
        node.add_port_mappings(ecs.PortMapping(container_port=9300))

        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536))
        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))
        node.add_mount_points(ecs.MountPoint(
            container_path="/usr/share/elasticsearch/data",
            source_volume="esdata",
            read_only=False,
        ))

        node_service = ecs.Ec2Service(
            scope=self,
            id="ES-NODE-SERVICE",
            cluster=cluster,
            task_definition=node_task_def,
            desired_count=1,
            service_name="NODE",
            security_group=sg,
        )

        node_lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="NODE-ELB",
            vpc=vpc,
            internet_facing=True,
        )
        node_listener = node_lb.add_listener(
            id="NODE-LISTENER",
            port=80,
        )
        node_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="NODE-GRP",
                container_name=node.container_name,
                listener=ecs.ListenerConfig.application_listener(
                    listener=node_listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))
        core.CfnOutput(
            scope=self,
            id="DNS-NODE",
            value=node_lb.load_balancer_dns_name,
        )
Пример #23
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        elastic_cluster_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="ES-TASK-DEF",
            network_mode=ecs.NetworkMode.BRIDGE,
        )

        elastic = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=30),
            task_definition=elastic_cluster_task_def,
            memory_limit_mib=4024,
            essential=True,
            image=ecs.ContainerImage.from_registry(
                name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                # "discovery.zen.ping.unicast.hosts": "elasticsearch",
                "node.name": constants.ES_CONTAINER_NAME,
                "node.master": "true",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms2g -Xmx2g",
            },
            logging=ecs.AwsLogDriver(
                stream_prefix="ES",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535))
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))

        elastic.add_port_mappings(ecs.PortMapping(container_port=9200))
        elastic.add_port_mappings(ecs.PortMapping(container_port=9300))

        #####################################################
        node = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_NODE_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=40),
            task_definition=elastic_cluster_task_def,
            memory_limit_mib=4024,
            essential=True,
            image=ecs.ContainerImage.from_registry(
                name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                "discovery.zen.ping.unicast.hosts": constants.ES_CONTAINER_NAME,
                "node.name": constants.ES_NODE_CONTAINER_NAME,
                "node.master": "false",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms2g -Xmx2g",
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="NODE",
                log_retention=logs.RetentionDays.ONE_DAY,
            ))

        node.add_port_mappings(ecs.PortMapping(container_port=9200))
        node.add_port_mappings(ecs.PortMapping(container_port=9300))

        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536))
        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))
        node.add_link(container=elastic, alias=constants.ES_CONTAINER_NAME)

        #####################################################

        ecs_service = ecs.Ec2Service(
            scope=self,
            id="ES-SERVICE",
            cluster=cluster,
            task_definition=elastic_cluster_task_def,
            desired_count=1,
            service_name=constants.ECS_ES_SERVICE,
        )

        lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="ELB",
            vpc=vpc,
            internet_facing=True,
        )
        listener = lb.add_listener(
            id="LISTENER",
            port=80,
        )
        ecs_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="TARGET-GRP",
                container_name=elastic.container_name,
                # container_port=9200,
                listener=ecs.ListenerConfig.application_listener(
                    listener=listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))

        core.CfnOutput(
            scope=self,
            id="DNS-NAME",
            value=lb.load_balancer_dns_name,
        )
Пример #24
0
    def __init__(self,
                 scope: cdk.Construct,
                 id: str,
                 name: str,
                 vpc_name: str,
                 security_group_name: str,
                 secrets_path: str = "/ibc/paper/",
                 trading_mode: str = "paper",
                 **kwargs) -> None:
        super().__init__(scope, id, *kwargs)

        # TODO: Create Log Group

        # Create a cluster
        vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_name=vpc_name)

        privateSubnets = vpc.private_subnets

        cluster = ecs.Cluster(self, "cluster", vpc=vpc)
        # TODO: check for namespace before adding below.  This is failing on stack updates.
        cluster.add_default_cloud_map_namespace(name="private")

        task = ecs.FargateTaskDefinition(self,
                                         "task",
                                         cpu="512",
                                         memory_mi_b="1024")

        # Add SSM Permissions to IAM Role
        SSM_ACTIONS = ["ssm:GetParametersByPath", "kms:Decrypt"]
        SSM_RESOURCES = [
            "arn:aws:kms:*:*:alias/aws/ssm",
            "arn:aws:ssm:*:*:parameter{}*".format(secrets_path),
        ]
        ssmPolicy = iam.PolicyStatement(iam.PolicyStatementEffect.Allow)
        for action in SSM_ACTIONS:
            ssmPolicy.add_action(action)
        for resource in SSM_RESOURCES:
            ssmPolicy.add_resource(resource)
        task.add_to_task_role_policy(ssmPolicy)

        ibcRepo = ecr.Repository.from_repository_name(self, "container_repo",
                                                      "ibc")

        ibcImage = ecs.ContainerImage.from_ecr_repository(ibcRepo, "latest")

        # TODO: Add to Existing Hierarchal Logger, add log_group argument with ref to it
        ibcLogger = ecs.AwsLogDriver(self, "logger", stream_prefix=name)

        connectionLossMetric = logs.MetricFilter(
            self,
            "connectionLossMetric",
            filter_pattern=logs.FilterPattern.literal("ERROR ?110 ?130"),
            log_group=ibcLogger.log_group,
            metric_name="ib_connection_loss",
            metric_namespace=name,
        )

        newContainerMetric = logs.MetricFilter(
            self,
            "newContainerMetric",
            filter_pattern=logs.FilterPattern.literal(
                "Starting virtual X frame buffer"),
            log_group=ibcLogger.log_group,
            metric_name="new_container",
            metric_namespace=name,
        )

        kinesisFirehoseBucketActions = [
            "s3:AbortMultipartUpload",
            "s3:GetBucketLocation",
            "s3:GetObject",
            "s3:ListBucket",
            "s3:ListBucketMultipartUploads",
        ]

        kinesisFirehoseBucket = s3.Bucket(self, "firehoseBucket")

        kinesisFirehoseBucketPolicy = iam.PolicyStatement(
            iam.PolicyStatementEffect.Allow)
        for action in kinesisFirehoseBucketActions:
            kinesisFirehoseBucketPolicy.add_action(action)
        for resource in [
                kinesisFirehoseBucket.bucket_arn,
                kinesisFirehoseBucket.bucket_arn + "/*",
        ]:
            kinesisFirehoseBucketPolicy.add_resource(resource)

        kinesisFirehoseBucketRole = iam.Role(
            self,
            "kinesisFirehoseBucketRole",
            assumed_by=iam.ServicePrincipal("firehose.amazonaws.com"),
            path="/service/" + name + "/",
        )
        kinesisFirehoseBucketRole.add_to_policy(kinesisFirehoseBucketPolicy)

        kinesisFirehose = firehose.CfnDeliveryStream(
            self,
            "firehose",
            delivery_stream_name=name,
            delivery_stream_type="DirectPut",
            s3_destination_configuration={
                "bucketArn": kinesisFirehoseBucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 10 * 60,
                    "sizeInMBs": 16
                },
                "compressionFormat": "GZIP",
                "roleArn": kinesisFirehoseBucketRole.role_arn,
            },
        )

        # Add Firehose Permissions to Task IAM Role
        FIREHOSE_ACTIONS = ["firehose:PutRecord", "firehose:PutRecordBatch"]
        firehosePolicy = iam.PolicyStatement(iam.PolicyStatementEffect.Allow)
        for action in FIREHOSE_ACTIONS:
            firehosePolicy.add_action(action)
        firehosePolicy.add_resource(kinesisFirehose.delivery_stream_arn)
        task.add_to_task_role_policy(firehosePolicy)

        environment = {
            "SECRETS_PATH": secrets_path,
            "TWS_LIVE_PAPER": trading_mode,
            "FIREHOSE_STREAM_NAME": kinesisFirehose.delivery_stream_name,
        }

        ibcContainer = ecs.ContainerDefinition(
            self,
            "container",
            task_definition=task,
            image=ibcImage,
            environment=environment,
            logging=ibcLogger,
            essential=True,
        )

        securityGroup = ec2.SecurityGroup.from_security_group_id(
            self, "task_security_group", security_group_id=security_group_name)

        ibcService = ecs.FargateService(
            self,
            "fargate_service",
            cluster=cluster,
            task_definition=task,
            assign_public_ip=False,
            desired_count=1,
            security_group=securityGroup,
            service_discovery_options=ecs.ServiceDiscoveryOptions(name=name),
            service_name=name,
            vpc_subnets=privateSubnets,
        )
    def __init__(self, scope: core.Construct, id: str, props: props_type,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        ns = SimpleNamespace(**props)

        bucket_name = os.environ.get("BUCKET_NAME")
        fernet_key_secret = sm.Secret.from_secret_arn(
            self, "fernetSecret", os.environ.get("FERNET_SECRET_ARN"))

        webserver_ns = sd.PrivateDnsNamespace(
            self,
            "webserver-dns-namespace",
            vpc=ns.vpc.instance,
            name="airflow",
            description="Private DNS for Airflow webserver",
        )

        # Webserver
        webserver_task = ecs.FargateTaskDefinition(
            self,
            "webserver-cdk",
            family="webserver-cdk",
            cpu=512,
            memory_limit_mib=1024,
            task_role=ns.airflow_cluster.airflow_task_role,
            execution_role=ns.airflow_cluster.task_execution_role,
        )
        webserver_container = webserver_task.add_container(
            "webserver-cdk-container",
            image=ecs.ContainerImage.from_ecr_repository(
                ns.ecr.airflow_webserver_repo,
                os.environ.get("IMAGE_TAG", "latest"),
            ),
            logging=ecs.AwsLogDriver(
                stream_prefix="ecs",
                log_group=ns.airflow_cluster.webserver_log_group),
            environment={
                "AIRFLOW_DATABASE_NAME": ns.rds.db_name,
                "AIRFLOW_DATABASE_PORT_NUMBER": "5432",
                "AIRFLOW_DATABASE_HOST":
                ns.rds.instance.db_instance_endpoint_address,
                "AIRFLOW_EXECUTOR": "CeleryExecutor",
                "AIRFLOW_LOAD_EXAMPLES": "no",
                "AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL": "30",
                "BUCKET_NAME": bucket_name,
            },
            secrets={
                "AIRFLOW_DATABASE_USERNAME":
                ecs.Secret.from_secrets_manager(ns.rds.rds_secret,
                                                field="username"),
                "AIRFLOW_DATABASE_PASSWORD":
                ecs.Secret.from_secrets_manager(ns.rds.rds_secret,
                                                field="password"),
                "AIRFLOW_FERNET_KEY":
                ecs.Secret.from_secrets_manager(fernet_key_secret),
            },
        )

        ws_port_mapping = ecs.PortMapping(container_port=8080,
                                          host_port=8080,
                                          protocol=ecs.Protocol.TCP)
        webserver_container.add_port_mappings(ws_port_mapping)

        # Webserver service
        webserver_service = ecs.FargateService(
            self,
            "webserverService",
            service_name="webserver_cdk",
            cluster=ns.airflow_cluster.instance,
            task_definition=webserver_task,
            desired_count=1,
            security_group=ns.vpc.airflow_sg,
            assign_public_ip=False,
            cloud_map_options=ecs.CloudMapOptions(
                cloud_map_namespace=webserver_ns,
                name="webserver",
                dns_record_type=sd.DnsRecordType.A,
                dns_ttl=core.Duration.seconds(30),
            ),
        )

        # Scheduler
        scheduler_task = ecs.FargateTaskDefinition(
            self,
            "scheduler-cdk",
            family="scheduler-cdk",
            cpu=512,
            memory_limit_mib=2048,
            task_role=ns.airflow_cluster.airflow_task_role,
            execution_role=ns.airflow_cluster.task_execution_role,
        )

        scheduler_task.add_container(
            "scheduler-cdk-container",
            image=ecs.ContainerImage.from_ecr_repository(
                ns.ecr.airflow_scheduler_repo,
                os.environ.get("IMAGE_TAG", "latest"),
            ),
            logging=ecs.AwsLogDriver(
                stream_prefix="ecs",
                log_group=ns.airflow_cluster.scheduler_log_group),
            environment={
                "AIRFLOW_DATABASE_NAME": ns.rds.db_name,
                "AIRFLOW_DATABASE_PORT_NUMBER": "5432",
                "AIRFLOW_DATABASE_HOST":
                ns.rds.instance.db_instance_endpoint_address,
                "AIRFLOW_EXECUTOR": "CeleryExecutor",
                "AIRFLOW_WEBSERVER_HOST": "webserver.airflow",
                "AIRFLOW_LOAD_EXAMPLES": "no",
                "AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL": "30",
                "REDIS_HOST": ns.redis.instance.attr_redis_endpoint_address,
                "BUCKET_NAME": bucket_name,
            },
            secrets={
                "AIRFLOW_DATABASE_USERNAME":
                ecs.Secret.from_secrets_manager(ns.rds.rds_secret,
                                                field="username"),
                "AIRFLOW_DATABASE_PASSWORD":
                ecs.Secret.from_secrets_manager(ns.rds.rds_secret,
                                                field="password"),
                "AIRFLOW_FERNET_KEY":
                ecs.Secret.from_secrets_manager(fernet_key_secret),
            },
        )
        # Scheduler service
        ecs.FargateService(
            self,
            "schedulerService",
            service_name="scheduler_cdk",
            cluster=ns.airflow_cluster.instance,
            task_definition=scheduler_task,
            desired_count=1,
            security_group=ns.vpc.airflow_sg,
            assign_public_ip=False,
        )

        # Worker
        worker_task = ecs.FargateTaskDefinition(
            self,
            "worker-cdk",
            family="worker-cdk",
            cpu=1024,
            memory_limit_mib=3072,
            task_role=ns.airflow_cluster.airflow_task_role,
            execution_role=ns.airflow_cluster.task_execution_role,
        )

        worker_container = worker_task.add_container(
            "worker-cdk-container",
            image=ecs.ContainerImage.from_ecr_repository(
                ns.ecr.airflow_worker_repo,
                os.environ.get("IMAGE_TAG", "latest"),
            ),
            logging=ecs.AwsLogDriver(
                stream_prefix="ecs",
                log_group=ns.airflow_cluster.worker_log_group),
            environment={
                "AIRFLOW_DATABASE_NAME": ns.rds.db_name,
                "AIRFLOW_DATABASE_PORT_NUMBER": "5432",
                "AIRFLOW_DATABASE_HOST":
                ns.rds.instance.db_instance_endpoint_address,
                "AIRFLOW_EXECUTOR": "CeleryExecutor",
                "AIRFLOW_WEBSERVER_HOST": "webserver.airflow",
                "AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL": "30",
                "AIRFLOW_LOAD_EXAMPLES": "no",
                "REDIS_HOST": ns.redis.instance.attr_redis_endpoint_address,
                "BUCKET_NAME": bucket_name,
            },
            secrets={
                "AIRFLOW_DATABASE_USERNAME":
                ecs.Secret.from_secrets_manager(ns.rds.rds_secret,
                                                field="username"),
                "AIRFLOW_DATABASE_PASSWORD":
                ecs.Secret.from_secrets_manager(ns.rds.rds_secret,
                                                field="password"),
                "AIRFLOW_FERNET_KEY":
                ecs.Secret.from_secrets_manager(fernet_key_secret),
            },
        )

        worker_port_mapping = ecs.PortMapping(container_port=8793,
                                              host_port=8793,
                                              protocol=ecs.Protocol.TCP)
        worker_container.add_port_mappings(worker_port_mapping)

        # Worker service
        ecs.FargateService(
            self,
            "workerService",
            service_name="worker_cdk",
            cluster=ns.airflow_cluster.instance,
            task_definition=worker_task,
            desired_count=1,
            security_group=ns.vpc.airflow_sg,
            assign_public_ip=False,
        )

        # ALB
        lb = elbv2.ApplicationLoadBalancer(
            self,
            "LB",
            vpc=ns.vpc.instance,
            internet_facing=True,
            security_group=ns.vpc.alb_sg,
        )

        listener = lb.add_listener("airflow-webserver-cdk-listener",
                                   port=80,
                                   open=True)

        webserver_hc = elbv2.HealthCheck(
            interval=core.Duration.seconds(60),
            path="/health",
            timeout=core.Duration.seconds(5),
        )

        # Attach ALB to ECS Service
        listener.add_targets(
            "airflow-webserver-cdk-default",
            port=80,
            targets=[webserver_service],
            health_check=webserver_hc,
        )
Пример #26
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: aws_ec2.Vpc,
                 ecs_cluster=aws_ecs.Cluster,
                 alb=elbv2.ApplicationLoadBalancer,
                 albTestListener=elbv2.ApplicationListener,
                 albProdListener=elbv2.ApplicationListener,
                 blueGroup=elbv2.ApplicationTargetGroup,
                 greenGroup=elbv2.ApplicationTargetGroup,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ECS_APP_NAME = "Nginx-app",
        ECS_DEPLOYMENT_GROUP_NAME = "NginxAppECSBlueGreen"
        ECS_DEPLOYMENT_CONFIG_NAME = "CodeDeployDefault.ECSLinear10PercentEvery1Minutes"
        ECS_TASKSET_TERMINATION_WAIT_TIME = 10
        ECS_TASK_FAMILY_NAME = "Nginx-microservice"
        ECS_APP_NAME = "Nginx-microservice"
        ECS_APP_LOG_GROUP_NAME = "/ecs/Nginx-microservice"
        DUMMY_TASK_FAMILY_NAME = "sample-Nginx-microservice"
        DUMMY_APP_NAME = "sample-Nginx-microservice"
        DUMMY_APP_LOG_GROUP_NAME = "/ecs/sample-Nginx-microservice"
        DUMMY_CONTAINER_IMAGE = "smuralee/nginx"

        # =============================================================================
        # ECR and CodeCommit repositories for the Blue/ Green deployment
        # =============================================================================

        # ECR repository for the docker images
        NginxecrRepo = aws_ecr.Repository(self,
                                          "NginxRepo",
                                          image_scan_on_push=True)

        NginxCodeCommitrepo = aws_codecommit.Repository(
            self,
            "NginxRepository",
            repository_name=ECS_APP_NAME,
            description="Oussama application hosted on NGINX")

        # =============================================================================
        #   CODE BUILD and ECS TASK ROLES for the Blue/ Green deployment
        # =============================================================================

        # IAM role for the Code Build project
        codeBuildServiceRole = aws_iam.Role(
            self,
            "codeBuildServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codebuild.amazonaws.com'))

        inlinePolicyForCodeBuild = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:InitiateLayerUpload", "ecr:UploadLayerPart",
                "ecr:CompleteLayerUpload", "ecr:PutImage"
            ],
            resources=["*"])

        codeBuildServiceRole.add_to_policy(inlinePolicyForCodeBuild)

        # ECS task role
        ecsTaskRole = aws_iam.Role(
            self,
            "ecsTaskRoleForWorkshop",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        ecsTaskRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonECSTaskExecutionRolePolicy"))

        # =============================================================================
        # CODE DEPLOY APPLICATION for the Blue/ Green deployment
        # =============================================================================

        # Creating the code deploy application
        codeDeployApplication = codedeploy.EcsApplication(
            self, "NginxAppCodeDeploy")

        # Creating the code deploy service role
        codeDeployServiceRole = aws_iam.Role(
            self,
            "codeDeployServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codedeploy.amazonaws.com'))
        codeDeployServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSCodeDeployRoleForECS"))

        # IAM role for custom lambda function
        customLambdaServiceRole = aws_iam.Role(
            self,
            "codeDeployCustomLambda",
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'))

        inlinePolicyForLambda = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codedeploy:List*",
                "codedeploy:Get*", "codedeploy:UpdateDeploymentGroup",
                "codedeploy:CreateDeploymentGroup",
                "codedeploy:DeleteDeploymentGroup"
            ],
            resources=["*"])

        customLambdaServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        customLambdaServiceRole.add_to_policy(inlinePolicyForLambda)

        # Custom resource to create the deployment group
        createDeploymentGroupLambda = aws_lambda.Function(
            self,
            'createDeploymentGroupLambda',
            code=aws_lambda.Code.from_asset("custom_resources"),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='create_deployment_group.handler',
            role=customLambdaServiceRole,
            description="Custom resource to create deployment group",
            memory_size=128,
            timeout=core.Duration.seconds(60))

        # ================================================================================================
        # CloudWatch Alarms for 4XX errors
        blue4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": blueGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))

        blueGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "blue4xxErrors",
            alarm_name="Blue_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Blue target group",
            metric=blue4xxMetric,
            threshold=1,
            evaluation_periods=1)

        green4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": greenGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))
        greenGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "green4xxErrors",
            alarm_name="Green_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Green target group",
            metric=green4xxMetric,
            threshold=1,
            evaluation_periods=1)

        # ================================================================================================
        # DUMMY TASK DEFINITION for the initial service creation
        # This is required for the service being made available to create the CodeDeploy Deployment Group
        # ================================================================================================
        sampleTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "sampleTaskDefn",
            family=DUMMY_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        sampleContainerDefn = sampleTaskDefinition.add_container(
            "sampleAppContainer",
            image=aws_ecs.ContainerImage.from_registry(DUMMY_CONTAINER_IMAGE),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "sampleAppLogGroup",
                log_group_name=DUMMY_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=DUMMY_APP_NAME),
            docker_labels={"name": DUMMY_APP_NAME})

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        sampleContainerDefn.add_port_mappings(port_mapping)

        # ================================================================================================
        # ECS task definition using ECR image
        # Will be used by the CODE DEPLOY for Blue/Green deployment
        # ================================================================================================
        NginxTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "appTaskDefn",
            family=ECS_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        NginxcontainerDefinition = NginxTaskDefinition.add_container(
            "NginxAppContainer",
            image=aws_ecs.ContainerImage.from_ecr_repository(
                NginxecrRepo, "latest"),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "NginxAppLogGroup",
                log_group_name=ECS_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=ECS_APP_NAME),
            docker_labels={"name": ECS_APP_NAME})
        NginxcontainerDefinition.add_port_mappings(port_mapping)

        # =============================================================================
        # ECS SERVICE for the Blue/ Green deployment
        # =============================================================================
        NginxAppService = aws_ecs.FargateService(
            self,
            "NginxAppService",
            cluster=ecs_cluster,
            task_definition=NginxTaskDefinition,
            health_check_grace_period=core.Duration.seconds(10),
            desired_count=3,
            deployment_controller={
                "type": aws_ecs.DeploymentControllerType.CODE_DEPLOY
            },
            service_name=ECS_APP_NAME)

        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(80))
        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(8080))
        NginxAppService.attach_to_application_target_group(blueGroup)

        # =============================================================================
        # CODE DEPLOY - Deployment Group CUSTOM RESOURCE for the Blue/ Green deployment
        # =============================================================================

        core.CustomResource(
            self,
            'customEcsDeploymentGroup',
            service_token=createDeploymentGroupLambda.function_arn,
            properties={
                "ApplicationName": codeDeployApplication.application_name,
                "DeploymentGroupName": ECS_DEPLOYMENT_GROUP_NAME,
                "DeploymentConfigName": ECS_DEPLOYMENT_CONFIG_NAME,
                "ServiceRoleArn": codeDeployServiceRole.role_arn,
                "BlueTargetGroup": blueGroup.target_group_name,
                "GreenTargetGroup": greenGroup.target_group_name,
                "ProdListenerArn": albProdListener.listener_arn,
                "TestListenerArn": albTestListener.listener_arn,
                "EcsClusterName": ecs_cluster.cluster_name,
                "EcsServiceName": NginxAppService.service_name,
                "TerminationWaitTime": ECS_TASKSET_TERMINATION_WAIT_TIME,
                "BlueGroupAlarm": blueGroupAlarm.alarm_name,
                "GreenGroupAlarm": greenGroupAlarm.alarm_name,
            })

        ecsDeploymentGroup = codedeploy.EcsDeploymentGroup.from_ecs_deployment_group_attributes(
            self,
            "ecsDeploymentGroup",
            application=codeDeployApplication,
            deployment_group_name=ECS_DEPLOYMENT_GROUP_NAME,
            deployment_config=codedeploy.EcsDeploymentConfig.
            from_ecs_deployment_config_name(self, "ecsDeploymentConfig",
                                            ECS_DEPLOYMENT_CONFIG_NAME))

        # =============================================================================
        # CODE BUILD PROJECT for the Blue/ Green deployment
        # =============================================================================

        # Creating the code build project
        NginxAppcodebuild = aws_codebuild.Project(
            self,
            "NginxAppCodeBuild",
            role=codeBuildServiceRole,
            environment=aws_codebuild.BuildEnvironment(
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_4_0,
                compute_type=aws_codebuild.ComputeType.SMALL,
                privileged=True,
                environment_variables={
                    'REPOSITORY_URI': {
                        'value':
                        NginxecrRepo.repository_uri,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_EXECUTION_ARN': {
                        'value':
                        ecsTaskRole.role_arn,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_FAMILY': {
                        'value':
                        ECS_TASK_FAMILY_NAME,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    }
                }),
            source=aws_codebuild.Source.code_commit(
                repository=NginxCodeCommitrepo))

        # =============================================================================
        # CODE PIPELINE for Blue/Green ECS deployment
        # =============================================================================

        codePipelineServiceRole = aws_iam.Role(
            self,
            "codePipelineServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codepipeline.amazonaws.com'))

        inlinePolicyForCodePipeline = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codecommit:Get*",
                "codecommit:List*", "codecommit:GitPull",
                "codecommit:UploadArchive", "codecommit:CancelUploadArchive",
                "codebuild:BatchGetBuilds", "codebuild:StartBuild",
                "codedeploy:CreateDeployment", "codedeploy:Get*",
                "codedeploy:RegisterApplicationRevision", "s3:Get*",
                "s3:List*", "s3:PutObject"
            ],
            resources=["*"])

        codePipelineServiceRole.add_to_policy(inlinePolicyForCodePipeline)

        sourceArtifact = codepipeline.Artifact('sourceArtifact')
        buildArtifact = codepipeline.Artifact('buildArtifact')

        # S3 bucket for storing the code pipeline artifacts
        NginxAppArtifactsBucket = s3.Bucket(
            self,
            "NginxAppArtifactsBucket",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # S3 bucket policy for the code pipeline artifacts
        denyUnEncryptedObjectUploads = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:PutObject"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={
                "StringNotEquals": {
                    "s3:x-amz-server-side-encryption": "aws:kms"
                }
            })

        denyInsecureConnections = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:*"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={"Bool": {
                "aws:SecureTransport": "false"
            }})

        NginxAppArtifactsBucket.add_to_resource_policy(
            denyUnEncryptedObjectUploads)
        NginxAppArtifactsBucket.add_to_resource_policy(denyInsecureConnections)

        # Code Pipeline - CloudWatch trigger event is created by CDK
        codepipeline.Pipeline(
            self,
            "ecsBlueGreen",
            role=codePipelineServiceRole,
            artifact_bucket=NginxAppArtifactsBucket,
            stages=[
                codepipeline.StageProps(
                    stage_name='Source',
                    actions=[
                        aws_codepipeline_actions.CodeCommitSourceAction(
                            action_name='Source',
                            repository=NginxCodeCommitrepo,
                            output=sourceArtifact,
                        )
                    ]),
                codepipeline.StageProps(
                    stage_name='Build',
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            action_name='Build',
                            project=NginxAppcodebuild,
                            input=sourceArtifact,
                            outputs=[buildArtifact])
                    ]),
                codepipeline.StageProps(
                    stage_name='Deploy',
                    actions=[
                        aws_codepipeline_actions.CodeDeployEcsDeployAction(
                            action_name='Deploy',
                            deployment_group=ecsDeploymentGroup,
                            app_spec_template_input=buildArtifact,
                            task_definition_template_input=buildArtifact,
                        )
                    ])
            ])

        # =============================================================================
        # Export the outputs
        # =============================================================================
        core.CfnOutput(self,
                       "ecsBlueGreenCodeRepo",
                       description="Demo app code commit repository",
                       export_name="ecsBlueGreenDemoAppRepo",
                       value=NginxCodeCommitrepo.repository_clone_url_http)

        core.CfnOutput(self,
                       "ecsBlueGreenLBDns",
                       description="Load balancer DNS",
                       export_name="ecsBlueGreenLBDns",
                       value=alb.load_balancer_dns_name)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # If left unchecked this pattern could "fan out" on the transform and load
        # lambdas to the point that it consumes all resources on the account. This is
        # why we are limiting concurrency to 2 on all 3 lambdas. Feel free to raise this.
        lambda_throttle_size = 2

        ####
        # DynamoDB Table
        # This is where our transformed data ends up
        ####
        table = dynamo_db.Table(self,
                                "TransformedData",
                                partition_key=dynamo_db.Attribute(
                                    name="id",
                                    type=dynamo_db.AttributeType.STRING))

        ####
        # S3 Landing Bucket
        # This is where the user uploads the file to be transformed
        ####
        bucket = s3.Bucket(self, "LandingBucket")

        ####
        # Queue that listens for S3 Bucket events
        ####
        queue = sqs.Queue(self,
                          'newObjectInLandingBucketEventQueue',
                          visibility_timeout=core.Duration.seconds(300))

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      s3n.SqsDestination(queue))

        # EventBridge Permissions
        event_bridge_put_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=['*'],
            actions=['events:PutEvents'])

        ####
        # Fargate ECS Task Creation to pull data from S3
        #
        # Fargate is used here because if you had a seriously large file,
        # you could stream the data to fargate for as long as needed before
        # putting the data onto eventbridge or up the memory/storage to
        # download the whole file. Lambda has limitations on runtime and
        # memory/storage
        ####
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        logging = ecs.AwsLogDriver(stream_prefix='TheEventBridgeETL',
                                   log_retention=logs.RetentionDays.ONE_WEEK)

        cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc)

        task_definition = ecs.TaskDefinition(
            self,
            'FargateTaskDefinition',
            memory_mib="512",
            cpu="256",
            compatibility=ecs.Compatibility.FARGATE)

        # We need to give our fargate container permission to put events on our EventBridge
        task_definition.add_to_task_role_policy(event_bridge_put_policy)
        # Grant fargate container access to the object that was uploaded to s3
        bucket.grant_read(task_definition.task_role)

        container = task_definition.add_container(
            'AppContainer',
            image=ecs.ContainerImage.from_asset(
                'container/s3DataExtractionTask'),
            logging=logging,
            environment={
                'S3_BUCKET_NAME': bucket.bucket_name,
                'S3_OBJECT_KEY': ''
            })

        ####
        # Lambdas
        #
        # These are used for 4 phases:
        #
        # Extract    - kicks of ecs fargate task to download data and splinter to eventbridge events
        # Transform  - takes the two comma separated strings and produces a json object
        # Load       - inserts the data into dynamodb
        # Observe    - This is a lambda that subscribes to all events and logs them centrally
        ####

        subnet_ids = []
        for subnet in vpc.private_subnets:
            subnet_ids.append(subnet.subnet_id)

        ####
        # Extract
        # defines an AWS Lambda resource to trigger our fargate ecs task
        ####
        extract_lambda = _lambda.Function(
            self,
            "extractLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="s3SqsEventConsumer.handler",
            code=_lambda.Code.from_asset("lambdas/extract"),
            reserved_concurrent_executions=lambda_throttle_size,
            environment={
                "CLUSTER_NAME": cluster.cluster_name,
                "TASK_DEFINITION": task_definition.task_definition_arn,
                "SUBNETS": json.dumps(subnet_ids),
                "CONTAINER_NAME": container.container_name
            })
        queue.grant_consume_messages(extract_lambda)
        extract_lambda.add_event_source(_event.SqsEventSource(queue=queue))
        extract_lambda.add_to_role_policy(event_bridge_put_policy)

        run_task_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[task_definition.task_definition_arn],
            actions=['ecs:RunTask'])
        extract_lambda.add_to_role_policy(run_task_policy_statement)

        task_execution_role_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[
                task_definition.obtain_execution_role().role_arn,
                task_definition.task_role.role_arn
            ],
            actions=['iam:PassRole'])
        extract_lambda.add_to_role_policy(task_execution_role_policy_statement)

        ####
        # Transform
        # defines a lambda to transform the data that was extracted from s3
        ####

        transform_lambda = _lambda.Function(
            self,
            "TransformLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="transform.handler",
            code=_lambda.Code.from_asset("lambdas/transform"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))
        transform_lambda.add_to_role_policy(event_bridge_put_policy)

        # Create EventBridge rule to route extraction events
        transform_rule = events.Rule(
            self,
            'transformRule',
            description='Data extracted from S3, Needs transformed',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['s3RecordExtraction'],
                detail={"status": ["extracted"]}))
        transform_rule.add_target(
            targets.LambdaFunction(handler=transform_lambda))

        ####
        # Load
        # load the transformed data in dynamodb
        ####

        load_lambda = _lambda.Function(
            self,
            "LoadLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="load.handler",
            code=_lambda.Code.from_asset("lambdas/load"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3),
            environment={"TABLE_NAME": table.table_name})
        load_lambda.add_to_role_policy(event_bridge_put_policy)
        table.grant_read_write_data(load_lambda)

        load_rule = events.Rule(
            self,
            'loadRule',
            description='Data transformed, Needs loaded into dynamodb',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['transform'],
                detail={"status": ["transformed"]}))
        load_rule.add_target(targets.LambdaFunction(handler=load_lambda))

        ####
        # Observe
        # Watch for all cdkpatterns.the-eventbridge-etl events and log them centrally
        ####

        observe_lambda = _lambda.Function(
            self,
            "ObserveLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="observe.handler",
            code=_lambda.Code.from_asset("lambdas/observe"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))

        observe_rule = events.Rule(
            self,
            'observeRule',
            description='all events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl']))

        observe_rule.add_target(targets.LambdaFunction(handler=observe_lambda))
Пример #28
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ******* Database table
        audiobooksDB = aws_dynamodb.Table(
            self,
            "audiobooksDB",
            partition_key=aws_dynamodb.Attribute(
                name="id", type=aws_dynamodb.AttributeType.STRING),
            read_capacity=2,
            write_capacity=2,
            billing_mode=aws_dynamodb.BillingMode.PROVISIONED)

        # ******* Lambda functions
        book_upload_lambda_function = aws_lambda.Function(
            self,
            "HandleBookUploadLambda",
            handler='app.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(
                '../Functions/handlers/handle_book_upload'))
        polly_audio_lambda_function = aws_lambda.Function(
            self,
            "HandlePollyAudioLambda",
            handler='app.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(
                '../Functions/handlers/handle_polly_audio'),
            timeout=core.Duration.seconds(120))

        # ******* S3 upload buckets
        BookUploadBucket = aws_s3.Bucket(self, "BookUploadBucket")
        AudioUploadBucket = aws_s3.Bucket(self, "AudioUploadBucket")
        VideoUploadBucket = aws_s3.Bucket(self, "VideoUploadBucket")
        ImageUploadBucket = aws_s3.Bucket(self, "ImageUploadBucket")

        # ******* Create S3 event source
        book_upload_lambda_function.add_event_source(
            S3EventSource(BookUploadBucket,
                          events=[aws_s3.EventType.OBJECT_CREATED],
                          filters=[{
                              "suffix": '.txt'
                          }]))
        # ******* Create SNS topic
        PollySNSTopic = aws_sns.Topic(self, "PollySNSTopic")
        PollySNSTopic.add_subscription(
            aws_sns_subscriptions.LambdaSubscription(
                polly_audio_lambda_function))

        # ******* Book function environment variables
        book_upload_lambda_function.add_environment("TABLE_NAME",
                                                    audiobooksDB.table_name)
        book_upload_lambda_function.add_environment(
            "AUDIO_S3_BUCKET", AudioUploadBucket.bucket_name)
        book_upload_lambda_function.add_environment("SNS_TOPIC",
                                                    PollySNSTopic.topic_arn)

        # ******* Book function permissions
        audiobooksDB.grant_write_data(book_upload_lambda_function)
        BookUploadBucket.grant_read(book_upload_lambda_function)
        AudioUploadBucket.grant_write(book_upload_lambda_function)
        PollySNSTopic.grant_publish(book_upload_lambda_function)
        book_upload_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["polly:*"], resources=["*"]))

        # ******* Fargate container permissions
        role = aws_iam.Role(
            self,
            "FargateContainerRole",
            assumed_by=aws_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:PutObject"],
                resources=[VideoUploadBucket.bucket_arn + "/*"]))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[AudioUploadBucket.bucket_arn + "/*"]))
        role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[ImageUploadBucket.bucket_arn + "/*"]))

        # ******* Fargate container
        vpc = aws_ec2.Vpc(self, "CdkFargateVpc", max_azs=2)
        cluster = aws_ecs.Cluster(self, 'FargateCluster', vpc=vpc)
        image = aws_ecs.ContainerImage.from_asset(
            "../Functions/ECSContainerFiles")
        task_definition = aws_ecs.FargateTaskDefinition(
            self,
            "FargateContainerTaskDefinition",
            execution_role=role,
            task_role=role,
            cpu=1024,
            memory_limit_mib=3072)

        port_mapping = aws_ecs.PortMapping(container_port=80, host_port=80)
        container = task_definition.add_container(
            "Container",
            image=image,
            logging=aws_ecs.AwsLogDriver(
                stream_prefix="videoProcessingContainer"))
        container.add_port_mappings(port_mapping)

        # ******* Audio function environment variables
        polly_audio_lambda_function.add_environment(
            "VIDEO_S3_BUCKET", VideoUploadBucket.bucket_name)
        polly_audio_lambda_function.add_environment(
            "TASK_DEFINITION_ARN", task_definition.task_definition_arn)
        polly_audio_lambda_function.add_environment("CLUSTER_ARN",
                                                    cluster.cluster_arn)
        polly_audio_lambda_function.add_environment("TABLE_NAME",
                                                    audiobooksDB.table_name)
        polly_audio_lambda_function.add_environment("CONTAINER_NAME",
                                                    container.container_name)
        polly_audio_lambda_function.add_environment("VPC_ID", str(vpc.vpc_id))

        # ******* Audio function permissions
        audiobooksDB.grant_read_write_data(polly_audio_lambda_function)
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["ecs:RunTask"], resources=["*"]))
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["iam:PassRole"], resources=["*"]))
        polly_audio_lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["ec2:DescribeSubnets"],
                                    resources=["*"]))
Пример #29
0
    def configure_container(self, appname: str, props: Props, tgroups: {}):
        virtnodes = {}
        if appname == 'gateway' or appname == 'tcpecho':
            colors = ['']
        else:
            colors = props.colors

        for color in colors:
            fullname = color.upper()+appname

            td = ecs.FargateTaskDefinition(self, fullname+'_task', cpu='256', memory_mi_b='512',
                                           execution_role=props.taskexeciamrole, task_role=props.taskiamrole)

            env = {}
            if appname != 'tcpecho':
                td.node.find_child('Resource').add_property_override('proxyConfiguration', {
                    'type': 'APPMESH',
                    'containerName': 'envoy',
                    'proxyConfigurationProperties': [
                        {'name': 'IgnoredUID',
                         'value': '1337'},
                        {'name': 'ProxyIngressPort',
                         'value': '15000'},
                        {'name': 'ProxyEgressPort',
                         'value': '15001'},
                        {'name': 'AppPorts',
                         'value': '9080'},
                        {'name': 'EgressIgnoredIPs',
                         'value': '169.254.170.2,169.254.169.254'}
                    ]
                })

                env = {
                    'SERVER_PORT': '9080'
                }

            if appname != 'tcpecho':
                contimage = ecs.EcrImage.from_ecr_repository(props.repos[appname], tag='latest')
            else:
                contimage = ecs.ContainerImage.from_registry('cjimti/go-echo')

            port = 9080
            if appname == 'gateway':
                env['COLOR_TELLER_ENDPOINT'] = props.repos['colorteller'].repository_name +\
                                               '.'+props.cluster.default_namespace.namespace_name+':9080'
                env['TCP_ECHO_ENDPOINT'] = 'tcpecho.'+props.cluster.default_namespace.namespace_name+':2701'
            elif appname == 'colorteller':
                env['COLOR'] = color
            else:
                env = {'TCP_PORT': '2701', 'NODE_NAME': 'mesh/' + props.mesh.mesh_name + '/virtualNode/tcpecho--vn'}
                port = 2701

            cont = ecs.ContainerDefinition(self, fullname+'-container', task_definition=td, essential=True,
                                           logging=ecs.AwsLogDriver(self, fullname+'-logs', stream_prefix=fullname),
                                           image=contimage, environment=env)
            #cont.add_port_mappings(container_port=port, host_port=port, protocol=ecs.Protocol.Tcp)

            # X-Ray and Envoy definition ----------------------------------------------------------------------------
            if appname != 'tcpecho':
                xrayimage = ecs.ContainerImage.from_registry('amazon/aws-xray-daemon')

                xtask = td.add_container('xray-daemon', image=xrayimage, cpu=32, memory_reservation_mi_b=256,
                                         logging=ecs.AwsLogDriver(self, fullname+'-xray-logs',
                                                                  stream_prefix=fullname+'-xray'),
                                         essential=True, user='******')
                xtask.add_port_mappings(container_port=2000, host_port=2000, protocol=ecs.Protocol.Udp)

                # Envoy definition ----------------------------------------------------------------------------------
                ENVOY_IMAGE_LOC = '111345817488.dkr.ecr.us-west-2.amazonaws.com/aws-appmesh-envoy:v1.9.1.0-prod'
                envoyimage = ecs.EcrImage.from_registry(ENVOY_IMAGE_LOC)

                envoyenv = {
                    'APPMESH_VIRTUAL_NODE_NAME': 'mesh/'+props.mesh.mesh_name+'/virtualNode/'+appname+'-'+color+'-vn',
                    'ENABLE_ENVOY_XRAY_TRACING': '1',
                    'ENABLE_ENVOY_STATS_TAGS': '1',
                    'ENVOY_LOG_LEVEL': 'debug'
                }

                if appname == 'gateway':
                    envoyenv['APPMESH_VIRTUAL_NODE_NAME'] = 'mesh/'+props.mesh.mesh_name+'/virtualNode/gateway--vn'

                envoy_hc = ecs.HealthCheck()
                envoy_hc['command'] = ['CMD-SHELL',
                                       'curl -s http://localhost:9901/server_info | grep state | grep -q LIVE']
                envoy_hc['interval'] = 5
                envoy_hc['timeout'] = 2
                envoy_hc['retries'] = 3

                etask = td.add_container('envoy', image=envoyimage, user='******', essential=True, environment=envoyenv,
                                         logging=ecs.AwsLogDriver(self, fullname+'-envoy-logs',
                                                                  stream_prefix=fullname+'-envoy'),
                                         health_check=envoy_hc)
                etask.add_port_mappings(container_port=9901, host_port=9901, protocol=ecs.Protocol.Tcp)
                etask.add_port_mappings(container_port=15000, host_port=15000, protocol=ecs.Protocol.Tcp)
                etask.add_port_mappings(container_port=15001, host_port=15001, protocol=ecs.Protocol.Tcp)

            # Prometheus & Grafana definition for Gateway ---------------------------------------------------------
            if appname == 'gateway':
                prometheusimage = ecs.EcrImage.from_ecr_repository(props.repos['prometheus'], tag='latest')

                ptask = td.add_container('prometheus', image=prometheusimage, essential=True,
                                         logging=ecs.AwsLogDriver(self, appname + '-prometheus-logs',
                                                                  stream_prefix=appname + '-prometheus'))
                ptask.add_port_mappings(container_port=9090, host_port=9090)

                grafanaimage = ecs.ContainerImage.from_registry('grafana/grafana:latest')
                gtask = td.add_container('grafana', image=grafanaimage, essential=True,
                                         logging=ecs.AwsLogDriver(self, appname + '-grafana-logs',
                                                                  stream_prefix=appname + '-grafana'))
                gtask.add_port_mappings(container_port=3000, host_port=3000)

            disco = ecs.ServiceDiscoveryOptions()
            disco['dnsRecordType'] = sdisc.DnsRecordType.A
            disco['dnsTtlSec'] = 3000
            if color == 'white':
                disco['name'] = 'colorteller'
            elif appname != 'gateway' and appname != 'tcpecho':
                disco['name'] = 'colorteller-'+color
            elif appname == 'gateway':
                disco['name'] = 'colorgateway'
            else:
                disco['name'] = 'tcpecho'

            svc = ecs.FargateService(self, fullname+'Service', maximum_percent=200, minimum_healthy_percent=100,
                                     desired_count=1, task_definition=td, cluster=props.cluster,
                                     vpc_subnets=props.vpc.private_subnets, security_group=props.csg,
                                     service_discovery_options=disco)

            if appname == 'gateway':
                svc._load_balancers = [{'containerName': 'grafana', 'containerPort': 3000,
                                        'targetGroupArn': tgroups['grafana'].target_group_arn}]

            path = '/ping' if appname != 'tcpecho' else '/'
            spec = {
                'listeners': [{
                    'portMapping': {'port': port, 'protocol': 'http'},
                    'healthCheck': {'protocol': 'http', 'path': path,
                                    'healthyThreshold': 2, 'unhealthyThreshold': 2,
                                    'timeoutMillis': 2000, 'intervalMillis': 5000}}],
                'serviceDiscovery': {
                    'dns': {'hostname': svc._cloudmap_service.service_name+'.'+
                                        props.cluster.default_namespace.namespace_name}
                }
            }

            if appname == 'gateway':
                spec['backends'] = [
                    {'virtualService': {'virtualServiceName':
                                            'colorteller'+'.'+props.cluster.default_namespace.namespace_name}},
                    {'virtualService': {'virtualServiceName':
                                            'tcpecho' + '.' + props.cluster.default_namespace.namespace_name}},
                ]

            # Create AppMesh virtual nodes ------------------------------------------------------------------------
            vn = appmesh.CfnVirtualNode(self, fullname + 'VirtualNode', mesh_name=props.mesh.mesh_name,
                                        virtual_node_name=appname + '-' + color + '-vn',
                                        spec=spec)

            virtnodes[fullname] = vn

        return virtnodes
    def __init__(
        self, 
        scope: core.Construct, 
        id: str, 
        keycloak_domain: str,
        vpc: ec2.IVpc = None, 
        cluster: ecs.ICluster = None, 
        load_balancer: elbv2.IApplicationLoadBalancer = None, 
        log_group: logs.ILogGroup = None,
        keycloak_database_name: str = 'keykloak',
        keycloak_database_user: str = 'admin',
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        
        keycloak_task_role = iam.Role(
            self, 'KeycloakTastRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        )

        keycloak_database_secret = secretsmanager.Secret(
            self, 'KeycloakDatabaseSecret',
            description='Keycloak Database Password',
            generate_secret_string=secretsmanager.SecretStringGenerator(exclude_punctuation=True)
        )

        keycloak_database_cluster = rds.DatabaseCluster(
            self, 'KeycloakDatabaseCluster',
            engine= rds.DatabaseClusterEngine.AURORA,
            instance_props=rds.InstanceProps(
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3, 
                    instance_size=ec2.InstanceSize.SMALL
                ),
                vpc=vpc,
            ),
            master_user= rds.Login(
                username=keycloak_database_user,
                password=keycloak_database_secret.secret_value,
            ),
            instances=1,
            default_database_name=keycloak_database_name,
            removal_policy=core.RemovalPolicy.DESTROY,
        )


        keycloak_hosted_zone = route53.HostedZone.from_lookup(
            self, 'KeycloakHostedZone',
            domain_name=keycloak_domain
        )

        keycloak_certificate = acm.DnsValidatedCertificate(
            self, 'KeycloakCertificate',
            hosted_zone=keycloak_hosted_zone,
            domain_name='keycloak.' + keycloak_domain
        )

        keycloak_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self, 'KeycloakLoadBalancedFargateService',
            load_balancer=load_balancer,
            cluster=cluster,

            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset('keycloak'),
                container_port=8080,
                enable_logging=True,
                task_role=keycloak_task_role,

                log_driver=ecs.AwsLogDriver(
                    stream_prefix='keycloak',
                    log_group=log_group,
                ),

                secrets={
                    'DB_PASSWORD': ecs.Secret.from_secrets_manager(keycloak_database_secret),
                },
                environment={
                    'DB_VENDOR': 'mysql',
                    'DB_USER': keycloak_database_user,
                    'DB_ADDR': keycloak_database_cluster.cluster_endpoint.hostname,
                    'DB_DATABASE': keycloak_database_name,
                    # 'KEYCLOAK_LOGLEVEL': 'DEBUG',
                    'PROXY_ADDRESS_FORWARDING': 'true',
                },
            ),

            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name= 'keycloak.' + keycloak_domain,
            domain_zone= keycloak_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        keycloak_service.target_group.enable_cookie_stickiness(core.Duration.seconds(24 * 60 * 60))
        keycloak_service.target_group.configure_health_check(
            port='8080',
            path='/auth/realms/master/.well-known/openid-configuration',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        keycloak_service.listener.add_certificates(
            'KeycloakListenerCertificate',
            certificates= [ keycloak_certificate ]
        )

        keycloak_database_cluster.connections.allow_default_port_from(keycloak_service.service, 'From Keycloak Fargate Service')