示例#1
0
    def __create_pull_service(self, service_name: str, ctx: object):
        ctx_srv = getattr(ctx.inbound.services.pull, service_name)

        ecs_task_role = self.__create_default_task_role(service_name)

        log_driver = ecs.LogDriver.aws_logs(log_group=self.log_group,
                                            stream_prefix=service_name)

        # create a Fargate task definition
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=f"{service_name}_task_definition",
            cpu=ctx_srv.size.cpu,
            memory_limit_mib=ctx_srv.size.ram,
            execution_role=self.ecs_exec_role,
            task_role=ecs_task_role,
        )

        # create a container definition and associate with the Fargate task
        container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
        container = ecs.ContainerDefinition(
            scope=self,
            id=f"{service_name}_container_definition",
            task_definition=task_definition,
            image=ecs.ContainerImage.from_ecr_repository(
                self.ecr_repository, "latest"),
            logging=log_driver,
            **container_vars)
        security_group = ec2.SecurityGroup(scope=self,
                                           id=f"{service_name}_sg",
                                           vpc=self.vpc)
        service = ecs.FargateService(scope=self,
                                     id=f"{service_name}_service",
                                     task_definition=task_definition,
                                     cluster=self.cluster,
                                     desired_count=getattr(
                                         ctx_srv, "desired_count",
                                         ctx.default_desired_count),
                                     service_name=service_name,
                                     security_group=security_group)

        scaling = service.auto_scale_task_count(
            max_capacity=ctx_srv.scaling.max_capacity,
            min_capacity=ctx_srv.scaling.min_capacity)

        scaling.scale_on_cpu_utilization(
            id="cpu_scaling",
            target_utilization_percent=ctx_srv.scaling.
            target_utilization_percent,
            scale_in_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_in_cooldown_seconds),
            scale_out_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_out_cooldown_seconds))
示例#2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the VPC for the honeypot(s), default is all AZs in region
        vpc = ec2.Vpc(self, "HoneypotVpc", max_azs=3)

        # Create the ECS cluster where fargate can deploy the Docker containers
        cluster = ecs.Cluster(self, "HoneypotCluster", vpc=vpc)

        # Define task definition for Fargate Service
        task_definition = ecs.FargateTaskDefinition(self,
                                                    "HoneypotTasks",
                                                    cpu=256,
                                                    memory_limit_mib=512)

        # Container definition
        container_definition = ecs.ContainerDefinition(
            self,
            "HoneypotContainerDefinition",
            image=ecs.ContainerImage.from_registry("statixs/cowrie"),
            #image=ecs.ContainerImage.from_asset(directory = "docker"),
            task_definition=task_definition,
            stop_timeout=core.Duration.seconds(2),
            logging=ecs.AwsLogDriver(
                stream_prefix="cowrie",
                log_retention=logs.RetentionDays.ONE_WEEK,
            ),
        )

        # ECS Security Group definition
        sg_ssh = ec2.SecurityGroup(self,
                                   "honeypot-sg-ssh",
                                   vpc=vpc,
                                   description="Allow SSH to the honeypot")
        sg_ssh.add_ingress_rule(ec2.Peer.ipv4("0.0.0.0/0"), ec2.Port.tcp(22))

        # Fargate service definition
        fargate_service = ecs.FargateService(
            self,
            "HoneypotFargate",
            cluster=cluster,
            assign_public_ip=True,
            desired_count=1,
            security_group=sg_ssh,
            task_definition=task_definition,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4)
示例#3
0
文件: stack.py 项目: vtluu/telemetry
    def __init__(self, scope: core.Construct, id: str, ctx: object, ecr_repository: ecr.Repository, kinesis_stream: ks.Stream, state_table: ddb.Table, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.ecr_repository = ecr_repository
        self.kinesis_stream = kinesis_stream
        self.state_table = state_table
        service_name = "processor"
        ctx_srv = getattr(ctx.outbound.services.pull, service_name)

        self.vpc = ec2.Vpc.from_vpc_attributes(
            self, "VPC",
            **ctx.vpc_props.dict()
        )

        # CloudWatch Logs Group
        self.log_group = cwl.LogGroup(
            scope = self,
            id = "logs"
        )

        # Create a new ECS cluster for our services
        self.cluster = ecs.Cluster(
            self,
            vpc = self.vpc,
            id = f"{id}_cluster"
        )
        cluster_name_output = core.CfnOutput(
            scope=self,
            id="cluster-name-out",
            value=self.cluster.cluster_name,
            export_name=f"{id}-cluster-name"
        )

        service_names_output = core.CfnOutput(
            scope=self,
            id="service-names-out",
            value=service_name,
            export_name=f"{id}-service-names"
        )
        
        # Create a role for ECS to interact with AWS APIs with standard permissions
        self.ecs_exec_role = iam.Role(
            scope = self,
            id = "ecs_logstash-exec_role",
            assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            managed_policies = ([
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonECSTaskExecutionRolePolicy")
            ])
        )
        # Grant ECS additional permissions to decrypt secrets from Secrets Manager that have been encrypted with our custom key
        if getattr(ctx, "secrets_key_arn", None) is not None:
            self.ecs_exec_role.add_to_policy(
                iam.PolicyStatement(
                    actions = ["kms:Decrypt"],
                    effect = iam.Effect.ALLOW,
                    resources = [ctx.secrets_key_arn]
                ))
        # Grant ECS permissions to log to our log group
        self.log_group.grant_write(self.ecs_exec_role)

        # Create a task role to grant permissions for Logstash to interact with AWS APIs
        ecs_task_role = iam.Role(
            scope = self,
            id = f"{service_name}_task_role",
            assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com")
        )
        # Add permissions for Logstash to send metrics to CloudWatch
        ecs_task_role.add_to_policy(
            iam.PolicyStatement(
                actions = ["cloudwatch:PutMetricData"],
                effect = iam.Effect.ALLOW,
                resources = ["*"]
            ))
        # Add permissions for Logstash to interact with our Kinesis queue
        self.kinesis_stream.grant_read(ecs_task_role)
        # Remove this when next version of kinesis module is released
        # https://github.com/aws/aws-cdk/pull/6141
        ecs_task_role.add_to_policy(
            iam.PolicyStatement(
                actions = ["kinesis:ListShards"],
                effect = iam.Effect.ALLOW,
                resources = [self.kinesis_stream.stream_arn]
            ))
        # Add permissions for Logstash to store Kinesis Consumer Library (KCL) state tracking in DynamoDB
        state_table.grant_full_access(ecs_task_role)
        # Add permissions for Logstash to upload logs to S3 for archive
        bucket_resources = []
        for k, v in ctx_srv.variables.items():
            if k.endswith("_log_bucket"):
                bucket_resources.append('arn:aws:s3:::{0}'.format(v))
                bucket_resources.append('arn:aws:s3:::{0}/*'.format(v))
        ecs_task_role.add_to_policy(
            iam.PolicyStatement(
                actions=[
                    "s3:PutObject",
                    "s3:ListMultipartUploadParts",
                    "s3:ListBucket",
                    "s3:AbortMultipartUpload"
                    ],
                effect=iam.Effect.ALLOW,
                resources=bucket_resources
            ))

        # Task Definition
        task_definition = ecs.FargateTaskDefinition(
            scope = self,
            id = f"{service_name}_task_definition",
            cpu = ctx_srv.size.cpu,
            memory_limit_mib = ctx_srv.size.ram,
            execution_role = self.ecs_exec_role,
            task_role = ecs_task_role,
        )

        log_driver = ecs.LogDriver.aws_logs(
            log_group = self.log_group,
            stream_prefix = service_name)
        
        # Container Definition
        container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
        container = ecs.ContainerDefinition(
            scope = self,
            id = f"{service_name}_container_definition",
            task_definition = task_definition,
            image = ecs.ContainerImage.from_ecr_repository(self.ecr_repository, "latest"),
            logging = log_driver,
            **container_vars
        )

        # Service Definition
        security_group = ec2.SecurityGroup(
            scope = self,
            id = f"{service_name}_sg",
            vpc = self.vpc
        )

        service = ecs.FargateService(
            scope = self,
            id = f"{service_name}_fargate_service",
            task_definition = task_definition,
            cluster = self.cluster,
            desired_count = getattr(ctx_srv, "desired_count", ctx.default_desired_count),
            service_name = service_name,
            security_group = security_group
        )

        scaling = service.auto_scale_task_count(
            max_capacity = ctx_srv.scaling.max_capacity,
            min_capacity = ctx_srv.scaling.min_capacity
        )

        scaling.scale_on_cpu_utilization(
            id = "cpu_scaling",
            target_utilization_percent = ctx_srv.scaling.target_utilization_percent,
            scale_in_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_in_cooldown_seconds),
            scale_out_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_out_cooldown_seconds),
        )
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        vpc: ec2.IVpc,
        kinesis_stream: kinesis.IStream,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        self._ecs_task_role = iam.Role(
            self,
            "ecsTaskRole",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description=
            "ECS Task Definition role to publish to Kinesis Data Stream",
        )

        api_credentials = secretsmanager.Secret.from_secret_complete_arn(
            self,
            "apiCredentials",
            secret_complete_arn=API_CREDENTIALS_ARN,
        )

        ecs_cluster = ecs.Cluster(self, "ecsCluster", vpc=vpc)

        ecs_task_definition = ecs.FargateTaskDefinition(
            self,
            "fargateTaskDefinition",
            task_role=self._ecs_task_role,
        )

        ecs.ContainerDefinition(
            self,
            "ecsContainer",
            image=ecs.ContainerImage.from_asset(
                directory="containers/crypto_app", ),
            secrets={
                "CRYPTO_COMPARE_API_KEY":
                ecs.Secret.from_secrets_manager(api_credentials,
                                                "CRYPTO_COMPARE_API_KEY"),
                # "TWITTER_API_KEY": ecs.Secret.from_secrets_manager(
                #     api_credentials, "TWITTER_API_KEY"
                # ),
                # "TWITTER_SECRET_API_KEY": ecs.Secret.from_secrets_manager(
                #     api_credentials, "TWITTER_SECRET_API_KEY"
                # ),
                # "TWITTER_BEARER_TOKEN": ecs.Secret.from_secrets_manager(
                #     api_credentials, "TWITTER_BEARER_TOKEN"
                # ),
                # "TWITTER_ACCESS_TOKEN": ecs.Secret.from_secrets_manager(
                #     api_credentials, "TWITTER_ACCESS_TOKEN"
                # ),
                # "TWITTER_SECRET_ACCESS_TOKEN": ecs.Secret.from_secrets_manager(
                #     api_credentials, "TWITTER_SECRET_ACCESS_TOKEN"
                # ),
            },
            environment={"KINESIS_STREAM_NAME": kinesis_stream.stream_name},
            task_definition=ecs_task_definition,
            logging=ecs.LogDriver.aws_logs(stream_prefix="ecs"),
        )

        ecs.FargateService(
            self,
            "fargateService",
            task_definition=ecs_task_definition,
            cluster=ecs_cluster,
        )

        kinesis_stream.grant_read_write(ecs_task_definition.task_role)
示例#5
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 stack_name: str,
                 task_definition_cpu: int,
                 task_definition_memory_limit_mib: int,
                 docker_image_name: str,
                 container_port: int,
                 desired_container_count: int,
                 private_subnets: Sequence[aws_ec2.Subnet] = None,
                 public_subnets: Sequence[aws_ec2.Subnet] = None,
                 private_security_group: aws_ec2.SecurityGroup = None,
                 public_security_group: aws_ec2.SecurityGroup = None,
                 vpc: aws_ec2.Vpc = None,
                 fargate_cluster: aws_ecs.Cluster = None,
                 authorizer_lambda_arn: str = None,
                 authorizer_lambda_role_arn: str = None,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        # Role
        self.role = aws_iam.Role(
            self,
            'Role',
            assumed_by=aws_iam.ServicePrincipal(service='ecs.amazonaws.com'),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name=
                    'service-role/AmazonECSTaskExecutionRolePolicy')
            ],
            inline_policies={
                id:
                aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        effect=aws_iam.Effect.ALLOW,
                        actions=[
                            'kms:Encrypt',
                            'kms:Decrypt',
                            'kms:ReEncrypt*',
                            'kms:GenerateDataKey*',
                            'kms:DescribeKey',
                            'ec2:CreateNetworkInterface',
                            'ec2:DescribeNetworkInterfaces',
                            'ec2:DeleteNetworkInterface',
                            # Remaining actions from https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-ecs.html
                            'elasticloadbalancing:DeregisterInstancesFromLoadBalancer',
                            'elasticloadbalancing:DeregisterTargets',
                            'elasticloadbalancing:Describe*',
                            'elasticloadbalancing:RegisterInstancesWithLoadBalancer',
                            'elasticloadbalancing:RegisterTargets',
                            'ec2:Describe*',
                            'ec2:AuthorizeSecurityGroupIngress'
                        ],
                        resources=['*'])
                ])
            })
        self.role.assume_role_policy.add_statements(
            aws_iam.PolicyStatement(
                actions=['sts:AssumeRole'],
                principals=[
                    aws_iam.ServicePrincipal(service='ecs-tasks.amazonaws.com')
                ]))

        # Set Defaults if parameters are None
        if vpc is None:
            vpc = aws_ec2.Vpc(self, 'Vpc')

        if private_subnets is None:
            private_subnets = vpc.private_subnets

        if public_subnets is None:
            public_subnets = vpc.public_subnets

        if public_security_group is None:
            public_security_group = aws_ec2.SecurityGroup(
                self, 'PublicSecurityGroup', vpc=vpc, allow_all_outbound=True)
            # Allow inbound HTTP traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=80))
            # Allow inbound HTTPS traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=443))

        if private_security_group is None:
            private_security_group = aws_ec2.SecurityGroup(
                self, 'PrivateSecurityGroup', vpc=vpc, allow_all_outbound=True)

            public_subnet_cidr_blocks = Utils.get_subnet_cidr_blocks(
                public_subnets)

            # Create an ingress rule for each of the NLB's subnet's CIDR ranges and add the rules to the ECS service's
            # security group.  This will allow requests from the NLB to go into the ECS service.  This allow inbound
            # traffic from public subnets.
            for cidr_block in public_subnet_cidr_blocks:
                private_security_group.add_ingress_rule(
                    peer=aws_ec2.Peer.ipv4(cidr_ip=cidr_block),
                    connection=aws_ec2.Port.tcp(port=container_port))

        if fargate_cluster is None:
            fargate_cluster = aws_ecs.Cluster(
                self,
                'FargateCluster',
            )

        task_def = aws_ecs.FargateTaskDefinition(
            self,
            'TaskDefinition',
            cpu=task_definition_cpu,
            memory_limit_mib=task_definition_memory_limit_mib,
            task_role=self.role,
            execution_role=self.role)

        container = aws_ecs.ContainerDefinition(
            self,
            'Container',
            image=aws_ecs.ContainerImage.from_registry(name=docker_image_name),
            task_definition=task_def,
            logging=aws_ecs.AwsLogDriver(stream_prefix='/ecs'))
        container.add_port_mappings(
            aws_ecs.PortMapping(container_port=container_port,
                                protocol=aws_ec2.Protocol.TCP))

        ecs_service = aws_ecs.FargateService(
            self,
            'FargateService',
            cluster=fargate_cluster,
            task_definition=task_def,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=private_subnets),
            security_group=private_security_group,
            desired_count=desired_container_count)

        target_group = aws_elasticloadbalancingv2.NetworkTargetGroup(
            self,
            'TargetGroup',
            port=80,  # Health check occurs over HTTP
            health_check=aws_elasticloadbalancingv2.HealthCheck(
                protocol=aws_elasticloadbalancingv2.Protocol.TCP),
            targets=[ecs_service],
            vpc=vpc)

        nlb = aws_elasticloadbalancingv2.NetworkLoadBalancer(
            self,
            'NetworkLoadBalancer',
            vpc=vpc,
            internet_facing=False,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=public_subnets),
        )
        nlb.add_listener(
            id='Listener',
            port=80,  # HTTP listener
            default_target_groups=[target_group])

        # nlb.log_access_logs(  # todo:  add this later when you have time to research the correct bucket policy.
        #     bucket=aws_s3.Bucket(
        #         self, 'LoadBalancerLogBucket',
        #         bucket_name='load-balancer-logs',
        #         public_read_access=False,
        #         block_public_access=aws_s3.BlockPublicAccess(
        #             block_public_policy=True,
        #             restrict_public_buckets=True
        #         )
        #     )
        # )

        # Dependencies
        ecs_service.node.add_dependency(nlb)

        # API Gateway
        rest_api = aws_apigateway.RestApi(self, stack_name)
        resource = rest_api.root.add_resource(
            path_part='{proxy+}',
            default_method_options=aws_apigateway.MethodOptions(
                request_parameters={'method.request.path.proxy': True}))

        token_authorizer = None
        if authorizer_lambda_arn and authorizer_lambda_role_arn:
            token_authorizer = aws_apigateway.TokenAuthorizer(  #todo: make this a parameter?
                self,
                'JwtTokenAuthorizer',
                results_cache_ttl=core.Duration.minutes(5),
                identity_source='method.request.header.Authorization',
                assume_role=aws_iam.Role.from_role_arn(
                    self,
                    'AuthorizerLambdaInvokationRole',
                    role_arn=authorizer_lambda_role_arn),
                handler=aws_lambda.Function.from_function_arn(
                    self,
                    'AuthorizerLambda',
                    function_arn=authorizer_lambda_arn))

        resource.add_method(
            http_method='ANY',
            authorization_type=aws_apigateway.AuthorizationType.CUSTOM,
            authorizer=token_authorizer,
            integration=aws_apigateway.HttpIntegration(
                url=f'http://{nlb.load_balancer_dns_name}/{{proxy}}',
                http_method='ANY',
                proxy=True,
                options=aws_apigateway.IntegrationOptions(
                    request_parameters={
                        'integration.request.path.proxy':
                        'method.request.path.proxy'
                    },
                    connection_type=aws_apigateway.ConnectionType.VPC_LINK,
                    vpc_link=aws_apigateway.VpcLink(
                        self,
                        'VpcLink',
                        description=
                        f'API Gateway VPC Link to internal NLB for {stack_name}',
                        vpc_link_name=stack_name,
                        targets=[nlb]))))
示例#6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # EC2 Vpc construct

        vpc = ec2.Vpc(
            self,
            id="cromwell_server_vpc",
            max_azs=2
        )

        # ECS Cluster construct
        cluster = ecs.Cluster(
            self,
            id="cromwell_cluster",
            vpc=vpc
        )

        # IAM roles
        ecstaskexecutionrole = iam.Role.from_role_arn(
            self,
            "ecstaskexecutionrole",
            role_arn="arn:aws:iam::562965587442:role/ecsTaskExecutionRole"
        )

        batch_service_role = iam.Role.from_role_arn(
            self,
            "batchservicerole",
            role_arn="arn:aws:iam::562965587442:role/AWSBatchServiceRole"
        )

        fargate_cromwell_role = iam.Role.from_role_arn(
            self,
            "fargate_cromwell_role",
            role_arn="arn:aws:iam::562965587442:role/fargate_cromwell_role"
        )

        # Cromwell docker image from ECR
        container_img = ecr.Repository.from_repository_name(
            self,
            "cromwell_docker_image",
            repository_name=CROMWELL_REPOSITORY_NAME
        )

        # ECS task definition construct
        task_def = ecs.TaskDefinition(
            self,
            "cromwell_server_task",
            execution_role=ecstaskexecutionrole,
            task_role=fargate_cromwell_role,
            compatibility=ecs.Compatibility.FARGATE,
            cpu="1024",
            memory_mib="4096"
        )

        # ECS container definition construct
        container_def = ecs.ContainerDefinition(
            self,
            "cromwell_container",
            task_definition=task_def,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=container_img,
                tag=CROMWELL_IMAGE_TAG
            ),
            command=["bash", "run_cromwell_server.sh"],
            cpu=1,
            health_check=None,
            working_directory='/',
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="cromwell_logs",
                datetime_format=None,
                log_group=None,
                log_retention=None,
                multiline_pattern=None
            )
        )
        container_def.add_port_mappings(
            ecs.PortMapping(
                container_port=CROMWELL_PORT_NUMBER,
                host_port=CROMWELL_PORT_NUMBER,
                protocol=ecs.Protocol.TCP
            )
        )

        # EC2 Security Group construct
        security_group = ec2.SecurityGroup(
            self,
            "cromwell_server_security_group",
            vpc=vpc,
            allow_all_outbound=True,
            security_group_name="cromwell_server_security_group",
            description="This is the security group assigned to the cromwell server running as a Fargate service.",
        )
        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ecs.Protocol.TCP,
                                from_port=CROMWELL_PORT_NUMBER,
                                to_port=CROMWELL_PORT_NUMBER,
                                string_representation="cromwell_server_port"),
            remote_rule=None
        )

        # ECS Fargate Service construct
        service = ecs.FargateService(
            self,
            "cromwell_service",
            task_definition=task_def,
            cluster=cluster,
            service_name="cromwell_server_service",
            assign_public_ip=True,
            desired_count=1,
            security_group=security_group
        )


        # Batch resources
        # Reference:
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html
        # with open("lib/aws_batch_launch_template_user_data.txt", 'r') as user_data_file:
        #     user_data = user_data_file.read()
        #
        # ec2_user_data = ec2.UserData.custom(content=user_data)
        # ec2_instance =ec2.Instance(
        #     self,
        #     "ec2Instance",
        #     instance_type=ec2.InstanceType("t2.small"),
        #     machine_image=ec2.AmazonLinuxImage(),
        #     vpc=vpc,
        #     user_data=ec2_user_data
        # )

        # launch_template_data = core.CfnResource(
        #     self,
        #     "cromwell_launch_template_data",
        #     type="AWS::EC2::LaunchTemplate.LaunchTemplateData",
        #     properties={
        #         "UserData": user_data
        #     }
        # )
        #
        # launch_template = ec2.CfnLaunchTemplate(
        #     self,
        #     "cromwell_launch_template",
        #     launch_template_name="cromwell_launch_template",
        #     launch_template_data=launch_template_data,
        # )
        #
        # compute_resources = core.CfnResource(
        #     self,
        #     "cromwell_compute_resources",
        #     type="AWS::Batch::ComputeEnvironment.ComputeResources",
        #     properties={
        #       "DesiredvCpus": 256,
        #       "Ec2KeyPair": "genovic-qc-eddev",
        #       "InstanceRole": "arn:aws:iam::562965587442:role/ecsInstanceRole",
        #       "InstanceTypes": ["optimal"],
        #       "LaunchTemplate": launch_template.launch_template_name,
        #       "MaxvCpus": 256,
        #       "MinvCpus": 0,
        #       "SecurityGroupIds": [vpc.vpc_default_security_group],
        #       "Subnets": [subnet.subnet_id for subnet in vpc.public_subnets],
        #       "Tags": "cromwell_compute_resource",
        #       "Type": "EC2"
        #     }
        # )
        #
        # compute_env = batch.CfnComputeEnvironment(
        #     self,
        #     "cromwell_compute_env",
        #     service_role=batch_service_role,
        #     compute_environment_name="cromwell_compute_env",
        #     type="MANAGED",
        #     state="ENABLED",
        #     compute_resources=compute_resources
        # )
        #
        # queue = batch.CfnJobQueue(
        #     self,
        #     "cromwell_queue",
        #     compute_environment_order=compute_env,
        #     priority=1,
        #     job_queue_name="cromwell_queue",
        #     state="ENABLED"
        # )
        #
        # core.CfnOutput(
        #     self,
        #     "cromwell_queue_name",
        #     value=queue.job_queue_name
        # )

        core.CfnOutput(
            self,
            "FargateCromwellServiceArn",
            value=service.service_arn
        )
示例#7
0
    def __init__(self,
                 scope: cdk.Construct,
                 id: str,
                 name: str,
                 vpc_name: str,
                 security_group_name: str,
                 secrets_path: str = "/ibc/paper/",
                 trading_mode: str = "paper",
                 **kwargs) -> None:
        super().__init__(scope, id, *kwargs)

        # TODO: Create Log Group

        # Create a cluster
        vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_name=vpc_name)

        privateSubnets = vpc.private_subnets

        cluster = ecs.Cluster(self, "cluster", vpc=vpc)
        # TODO: check for namespace before adding below.  This is failing on stack updates.
        cluster.add_default_cloud_map_namespace(name="private")

        task = ecs.FargateTaskDefinition(self,
                                         "task",
                                         cpu="512",
                                         memory_mi_b="1024")

        # Add SSM Permissions to IAM Role
        SSM_ACTIONS = ["ssm:GetParametersByPath", "kms:Decrypt"]
        SSM_RESOURCES = [
            "arn:aws:kms:*:*:alias/aws/ssm",
            "arn:aws:ssm:*:*:parameter{}*".format(secrets_path),
        ]
        ssmPolicy = iam.PolicyStatement(iam.PolicyStatementEffect.Allow)
        for action in SSM_ACTIONS:
            ssmPolicy.add_action(action)
        for resource in SSM_RESOURCES:
            ssmPolicy.add_resource(resource)
        task.add_to_task_role_policy(ssmPolicy)

        ibcRepo = ecr.Repository.from_repository_name(self, "container_repo",
                                                      "ibc")

        ibcImage = ecs.ContainerImage.from_ecr_repository(ibcRepo, "latest")

        # TODO: Add to Existing Hierarchal Logger, add log_group argument with ref to it
        ibcLogger = ecs.AwsLogDriver(self, "logger", stream_prefix=name)

        connectionLossMetric = logs.MetricFilter(
            self,
            "connectionLossMetric",
            filter_pattern=logs.FilterPattern.literal("ERROR ?110 ?130"),
            log_group=ibcLogger.log_group,
            metric_name="ib_connection_loss",
            metric_namespace=name,
        )

        newContainerMetric = logs.MetricFilter(
            self,
            "newContainerMetric",
            filter_pattern=logs.FilterPattern.literal(
                "Starting virtual X frame buffer"),
            log_group=ibcLogger.log_group,
            metric_name="new_container",
            metric_namespace=name,
        )

        kinesisFirehoseBucketActions = [
            "s3:AbortMultipartUpload",
            "s3:GetBucketLocation",
            "s3:GetObject",
            "s3:ListBucket",
            "s3:ListBucketMultipartUploads",
        ]

        kinesisFirehoseBucket = s3.Bucket(self, "firehoseBucket")

        kinesisFirehoseBucketPolicy = iam.PolicyStatement(
            iam.PolicyStatementEffect.Allow)
        for action in kinesisFirehoseBucketActions:
            kinesisFirehoseBucketPolicy.add_action(action)
        for resource in [
                kinesisFirehoseBucket.bucket_arn,
                kinesisFirehoseBucket.bucket_arn + "/*",
        ]:
            kinesisFirehoseBucketPolicy.add_resource(resource)

        kinesisFirehoseBucketRole = iam.Role(
            self,
            "kinesisFirehoseBucketRole",
            assumed_by=iam.ServicePrincipal("firehose.amazonaws.com"),
            path="/service/" + name + "/",
        )
        kinesisFirehoseBucketRole.add_to_policy(kinesisFirehoseBucketPolicy)

        kinesisFirehose = firehose.CfnDeliveryStream(
            self,
            "firehose",
            delivery_stream_name=name,
            delivery_stream_type="DirectPut",
            s3_destination_configuration={
                "bucketArn": kinesisFirehoseBucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 10 * 60,
                    "sizeInMBs": 16
                },
                "compressionFormat": "GZIP",
                "roleArn": kinesisFirehoseBucketRole.role_arn,
            },
        )

        # Add Firehose Permissions to Task IAM Role
        FIREHOSE_ACTIONS = ["firehose:PutRecord", "firehose:PutRecordBatch"]
        firehosePolicy = iam.PolicyStatement(iam.PolicyStatementEffect.Allow)
        for action in FIREHOSE_ACTIONS:
            firehosePolicy.add_action(action)
        firehosePolicy.add_resource(kinesisFirehose.delivery_stream_arn)
        task.add_to_task_role_policy(firehosePolicy)

        environment = {
            "SECRETS_PATH": secrets_path,
            "TWS_LIVE_PAPER": trading_mode,
            "FIREHOSE_STREAM_NAME": kinesisFirehose.delivery_stream_name,
        }

        ibcContainer = ecs.ContainerDefinition(
            self,
            "container",
            task_definition=task,
            image=ibcImage,
            environment=environment,
            logging=ibcLogger,
            essential=True,
        )

        securityGroup = ec2.SecurityGroup.from_security_group_id(
            self, "task_security_group", security_group_id=security_group_name)

        ibcService = ecs.FargateService(
            self,
            "fargate_service",
            cluster=cluster,
            task_definition=task,
            assign_public_ip=False,
            desired_count=1,
            security_group=securityGroup,
            service_discovery_options=ecs.ServiceDiscoveryOptions(name=name),
            service_name=name,
            vpc_subnets=privateSubnets,
        )
示例#8
0
    def configure_container(self, appname: str, props: Props, tgroups: {}):
        virtnodes = {}
        if appname == 'gateway' or appname == 'tcpecho':
            colors = ['']
        else:
            colors = props.colors

        for color in colors:
            fullname = color.upper()+appname

            td = ecs.FargateTaskDefinition(self, fullname+'_task', cpu='256', memory_mi_b='512',
                                           execution_role=props.taskexeciamrole, task_role=props.taskiamrole)

            env = {}
            if appname != 'tcpecho':
                td.node.find_child('Resource').add_property_override('proxyConfiguration', {
                    'type': 'APPMESH',
                    'containerName': 'envoy',
                    'proxyConfigurationProperties': [
                        {'name': 'IgnoredUID',
                         'value': '1337'},
                        {'name': 'ProxyIngressPort',
                         'value': '15000'},
                        {'name': 'ProxyEgressPort',
                         'value': '15001'},
                        {'name': 'AppPorts',
                         'value': '9080'},
                        {'name': 'EgressIgnoredIPs',
                         'value': '169.254.170.2,169.254.169.254'}
                    ]
                })

                env = {
                    'SERVER_PORT': '9080'
                }

            if appname != 'tcpecho':
                contimage = ecs.EcrImage.from_ecr_repository(props.repos[appname], tag='latest')
            else:
                contimage = ecs.ContainerImage.from_registry('cjimti/go-echo')

            port = 9080
            if appname == 'gateway':
                env['COLOR_TELLER_ENDPOINT'] = props.repos['colorteller'].repository_name +\
                                               '.'+props.cluster.default_namespace.namespace_name+':9080'
                env['TCP_ECHO_ENDPOINT'] = 'tcpecho.'+props.cluster.default_namespace.namespace_name+':2701'
            elif appname == 'colorteller':
                env['COLOR'] = color
            else:
                env = {'TCP_PORT': '2701', 'NODE_NAME': 'mesh/' + props.mesh.mesh_name + '/virtualNode/tcpecho--vn'}
                port = 2701

            cont = ecs.ContainerDefinition(self, fullname+'-container', task_definition=td, essential=True,
                                           logging=ecs.AwsLogDriver(self, fullname+'-logs', stream_prefix=fullname),
                                           image=contimage, environment=env)
            #cont.add_port_mappings(container_port=port, host_port=port, protocol=ecs.Protocol.Tcp)

            # X-Ray and Envoy definition ----------------------------------------------------------------------------
            if appname != 'tcpecho':
                xrayimage = ecs.ContainerImage.from_registry('amazon/aws-xray-daemon')

                xtask = td.add_container('xray-daemon', image=xrayimage, cpu=32, memory_reservation_mi_b=256,
                                         logging=ecs.AwsLogDriver(self, fullname+'-xray-logs',
                                                                  stream_prefix=fullname+'-xray'),
                                         essential=True, user='******')
                xtask.add_port_mappings(container_port=2000, host_port=2000, protocol=ecs.Protocol.Udp)

                # Envoy definition ----------------------------------------------------------------------------------
                ENVOY_IMAGE_LOC = '111345817488.dkr.ecr.us-west-2.amazonaws.com/aws-appmesh-envoy:v1.9.1.0-prod'
                envoyimage = ecs.EcrImage.from_registry(ENVOY_IMAGE_LOC)

                envoyenv = {
                    'APPMESH_VIRTUAL_NODE_NAME': 'mesh/'+props.mesh.mesh_name+'/virtualNode/'+appname+'-'+color+'-vn',
                    'ENABLE_ENVOY_XRAY_TRACING': '1',
                    'ENABLE_ENVOY_STATS_TAGS': '1',
                    'ENVOY_LOG_LEVEL': 'debug'
                }

                if appname == 'gateway':
                    envoyenv['APPMESH_VIRTUAL_NODE_NAME'] = 'mesh/'+props.mesh.mesh_name+'/virtualNode/gateway--vn'

                envoy_hc = ecs.HealthCheck()
                envoy_hc['command'] = ['CMD-SHELL',
                                       'curl -s http://localhost:9901/server_info | grep state | grep -q LIVE']
                envoy_hc['interval'] = 5
                envoy_hc['timeout'] = 2
                envoy_hc['retries'] = 3

                etask = td.add_container('envoy', image=envoyimage, user='******', essential=True, environment=envoyenv,
                                         logging=ecs.AwsLogDriver(self, fullname+'-envoy-logs',
                                                                  stream_prefix=fullname+'-envoy'),
                                         health_check=envoy_hc)
                etask.add_port_mappings(container_port=9901, host_port=9901, protocol=ecs.Protocol.Tcp)
                etask.add_port_mappings(container_port=15000, host_port=15000, protocol=ecs.Protocol.Tcp)
                etask.add_port_mappings(container_port=15001, host_port=15001, protocol=ecs.Protocol.Tcp)

            # Prometheus & Grafana definition for Gateway ---------------------------------------------------------
            if appname == 'gateway':
                prometheusimage = ecs.EcrImage.from_ecr_repository(props.repos['prometheus'], tag='latest')

                ptask = td.add_container('prometheus', image=prometheusimage, essential=True,
                                         logging=ecs.AwsLogDriver(self, appname + '-prometheus-logs',
                                                                  stream_prefix=appname + '-prometheus'))
                ptask.add_port_mappings(container_port=9090, host_port=9090)

                grafanaimage = ecs.ContainerImage.from_registry('grafana/grafana:latest')
                gtask = td.add_container('grafana', image=grafanaimage, essential=True,
                                         logging=ecs.AwsLogDriver(self, appname + '-grafana-logs',
                                                                  stream_prefix=appname + '-grafana'))
                gtask.add_port_mappings(container_port=3000, host_port=3000)

            disco = ecs.ServiceDiscoveryOptions()
            disco['dnsRecordType'] = sdisc.DnsRecordType.A
            disco['dnsTtlSec'] = 3000
            if color == 'white':
                disco['name'] = 'colorteller'
            elif appname != 'gateway' and appname != 'tcpecho':
                disco['name'] = 'colorteller-'+color
            elif appname == 'gateway':
                disco['name'] = 'colorgateway'
            else:
                disco['name'] = 'tcpecho'

            svc = ecs.FargateService(self, fullname+'Service', maximum_percent=200, minimum_healthy_percent=100,
                                     desired_count=1, task_definition=td, cluster=props.cluster,
                                     vpc_subnets=props.vpc.private_subnets, security_group=props.csg,
                                     service_discovery_options=disco)

            if appname == 'gateway':
                svc._load_balancers = [{'containerName': 'grafana', 'containerPort': 3000,
                                        'targetGroupArn': tgroups['grafana'].target_group_arn}]

            path = '/ping' if appname != 'tcpecho' else '/'
            spec = {
                'listeners': [{
                    'portMapping': {'port': port, 'protocol': 'http'},
                    'healthCheck': {'protocol': 'http', 'path': path,
                                    'healthyThreshold': 2, 'unhealthyThreshold': 2,
                                    'timeoutMillis': 2000, 'intervalMillis': 5000}}],
                'serviceDiscovery': {
                    'dns': {'hostname': svc._cloudmap_service.service_name+'.'+
                                        props.cluster.default_namespace.namespace_name}
                }
            }

            if appname == 'gateway':
                spec['backends'] = [
                    {'virtualService': {'virtualServiceName':
                                            'colorteller'+'.'+props.cluster.default_namespace.namespace_name}},
                    {'virtualService': {'virtualServiceName':
                                            'tcpecho' + '.' + props.cluster.default_namespace.namespace_name}},
                ]

            # Create AppMesh virtual nodes ------------------------------------------------------------------------
            vn = appmesh.CfnVirtualNode(self, fullname + 'VirtualNode', mesh_name=props.mesh.mesh_name,
                                        virtual_node_name=appname + '-' + color + '-vn',
                                        spec=spec)

            virtnodes[fullname] = vn

        return virtnodes
    def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

	#############################################
	#Import resorce and custom setting part start
	#############################################
        #cn-north-1
        impRes={
                 "vpc":"vpc-0883083ff3a10c1ec",
                 "SvcSG":"sg-04d3b60e954c1c1ef",
                 "ALBSG":"sg-0b6d093d52d48bba9",
                 "ALBInternet":True,
                 "taskRole":"arn:aws-cn:iam::627484392488:role/ecsTaskExecutionRole",
                 "AlbSubnet":[
                       {"subnetId":"subnet-0d16fa0c969f234d3",
                        "routeTabId":"rtb-074c6b532f3030ad6"},
                       {"subnetId":"subnet-0f28a97c04d3b11cd",
                        "routeTabId":"rtb-074c6b532f3030ad6"}
                 ],
                 #"SvcSubNet":[{"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-074c6b532f3030ad6"}]
                 "SvcSubNet":[{"subnetId":"subnet-0f28a97c04d3b11cd","routeTabId":"rtb-0587cc522717461cd"},
                              {"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-0587cc522717461cd"}]
               }
        newRes={
                 "TG":{"HealthPath":"/test.html","Port":80,"containPort":80},
                 "Listener":{"Port":80},
                 "TaskFamily":"tsFargate",
                 "ImageAsset1":{"DockfilePath":"httpd-ssh",
                                "BuildArgs":{"HTTP_PROXY":"http://YOUR_PROXY_SERVER:80"}
                               }
               }

        MyTaskDefinition=[{"Cpu":512,"MemLimitMib":1024}]
        MyContainerDefinition=[
             {"containerName":"MyContainer1",
              "cpu":256,
              "essential":True,
              "portMappings":[ecs.PortMapping(container_port=80,host_port=80)], #"portMappings":[ecs.PortMapping(container_port=80,host_port=80),ecs.PortMapping(container_port=22,host_port=22)],
              "environment":{"SSH_PUBLIC_KEY":"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC/alWrS+HH5KkPbso+Tsy+Z0WGTX5wvXvon5OacLMyOU3gj2mbbIifasXf/RadpuywuyW3uFirtRlPmSb5Q0PVLODku503Xettw+u6/Z22VV7F2ACgg4iHaCo2SR4L8saUrLLfcKXKr/WCn3w7uYcqGsXEcSFCCSZgn4BoZJqP4Q=="},
              "LogMountPoint":["/usr/local/apache2/logs"]
             }
        ]
        MySvc={"AssignPubIp":True, "desiredCount":1}
	#############################################
	#Import resorce and custom setting part end
	#############################################
		
        #if you import external resource app you cannot set destory policy
        #import VPC, Private Subnet, SG
        vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_id=impRes["vpc"])	
        
        #import SG
        mysvcsg = ec2.SecurityGroup.from_security_group_id(self, "svcsg", 
                impRes["SvcSG"],
                mutable=False)
        
        #import Role
        taskRole = iam.Role.from_role_arn(self, "TaskRole",impRes["taskRole"])
        
        #create ALB        
        mytargetGrp = elbv2.ApplicationTargetGroup(self, "targetGrp",
                target_type=elbv2.TargetType.IP,
                port=newRes["TG"]["Port"],
                vpc=vpc,
                health_check=elbv2.HealthCheck(path=newRes["TG"]["HealthPath"]))
        #target group cannot use .apply_removal_policy directly
        cfn_mytargetGrp=mytargetGrp.node.find_child("Resource")
        cfn_mytargetGrp.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
		
        #import public subnet for alb
        albsubnets = [
                ec2.Subnet.from_subnet_attributes(self,'albsubnetid1',
                    subnet_id = impRes["AlbSubnet"][0]["subnetId"], 
                    route_table_id=impRes["AlbSubnet"][0]["routeTabId"]
                ),
                ec2.Subnet.from_subnet_attributes(self,'albsubnetid2',
                    subnet_id = impRes["AlbSubnet"][1]["subnetId"], 
                    route_table_id=impRes["AlbSubnet"][1]["routeTabId"]
                )
        ]		
        vpc_subnets_selection = ec2.SubnetSelection(subnets=albsubnets)
        #create new ALB
        myalb = elbv2.ApplicationLoadBalancer(self, "ALBv2",
                vpc=vpc,
                security_group=ec2.SecurityGroup.from_security_group_id(self, "ALBSG", impRes["ALBSG"],mutable=False),
                internet_facing=impRes["ALBInternet"],
                vpc_subnets=vpc_subnets_selection)
        myalb.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        #create new ALB listener
        myalblistener = elbv2.ApplicationListener(self, "ALBlistenter", 
                load_balancer=myalb, 
                port=newRes["Listener"]["Port"])
        myalblistener.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
        myalblistener.add_target_groups("albaddtg", target_groups=[mytargetGrp])
        
        
        #create new ECS Cluster
        mycluster = ecs.Cluster(self, "cluster", vpc=vpc)
        mycluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
        
        fargatetaskDefinition = ecs.FargateTaskDefinition(self, "fargatetaskDefinition",
                cpu=MyTaskDefinition[0]["Cpu"],
                memory_limit_mib=MyTaskDefinition[0]["MemLimitMib"],
                execution_role=taskRole,
                family=newRes["TaskFamily"],
                task_role=taskRole)
                #volumes=myEfsVols)      
        fargatetaskDefinition.apply_removal_policy(cdk.RemovalPolicy.DESTROY)


        #defind docker image asset
        dirname = os.path.dirname(__file__)
        #for container 1 normally httpd
        #create Image assent image will generated locally then push to ecr
        asset1 = DockerImageAsset(self, "ImageAsset1",
                   directory=os.path.join(dirname, "../..", newRes["ImageAsset1"]["DockfilePath"]),
                   build_args=newRes["ImageAsset1"]["BuildArgs"]
                )
         
        #create container definition for task definition
        MyContainer1def = ecs.ContainerDefinition(self, "MyContainer1def",
                task_definition=fargatetaskDefinition,
                linux_parameters=ecs.LinuxParameters(self,"LinuxPara1",init_process_enabled=True),
                image=ecs.ContainerImage.from_ecr_repository(asset1.repository, asset1.image_uri.rpartition(":")[-1]),
                container_name=MyContainerDefinition[0]["containerName"],
                essential=MyContainerDefinition[0]["essential"],
                port_mappings=MyContainerDefinition[0]["portMappings"],
                environment=MyContainerDefinition[0]["environment"]
        )
	
	#import service private subnet
        mysvcprivateSNs = [
                ec2.Subnet.from_subnet_attributes(self,'svcprivateSN1',
                    subnet_id = impRes["SvcSubNet"][0]["subnetId"], 
                    route_table_id=impRes["SvcSubNet"][0]["routeTabId"]),
                ec2.Subnet.from_subnet_attributes(self,'svcprivateSN2',
                    subnet_id = impRes["SvcSubNet"][1]["subnetId"], 
                    route_table_id=impRes["SvcSubNet"][1]["routeTabId"])
        ]

        #create service
        myservice=ecs.FargateService(self,"service",
                task_definition=fargatetaskDefinition,
                assign_public_ip=MySvc["AssignPubIp"],
                platform_version=ecs.FargatePlatformVersion.VERSION1_4,
                vpc_subnets=ec2.SubnetSelection(subnets=mysvcprivateSNs),
                security_group=mysvcsg,
                cluster=mycluster,
                desired_count=MySvc["desiredCount"])
        
        mytargetGrp.add_target(myservice.load_balancer_target(container_name="MyContainer1",container_port=newRes["TG"]["containPort"], protocol=ecs.Protocol.TCP))
示例#10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        cluster = ecs.Cluster(
            self, "EC2Cluster",
            vpc=vpc
        )

        security_group = ec2.SecurityGroup(
            self, "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=True,
        )

        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.all_tcp(),
            description="Allow all traffic"
        )

        app_target_group = elbv2.ApplicationTargetGroup(
            self, "AppTargetGroup",
            port=http_port,
            vpc=vpc,
            target_type=elbv2.TargetType.IP,
        )

        elastic_loadbalancer = elbv2.ApplicationLoadBalancer(
            self, "ALB",
            vpc=vpc,
            internet_facing=True,
            security_group=security_group,
        )

        app_listener = elbv2.ApplicationListener(
            self, "AppListener",
            load_balancer=elastic_loadbalancer,
            port=http_port,
            default_target_groups=[app_target_group],
        )

        task_definition = ecs.TaskDefinition(
            self, "TaskDefenition",
            compatibility=ecs.Compatibility.FARGATE,
            cpu=task_def_cpu,
            memory_mib=task_def_memory_mb,
        )

        container_defenition = ecs.ContainerDefinition(
            self, "ContainerDefenition",
            image=ecs.ContainerImage.from_registry("vulnerables/web-dvwa"),
            task_definition=task_definition,
            logging=ecs.AwsLogDriver(
                stream_prefix="DemoContainerLogs",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )

        container_defenition.add_port_mappings(
            ecs.PortMapping(
                container_port=http_port,
            )
        )

        fargate_service = ecs.FargateService(
            self, "FargateService",
            task_definition=task_definition,
            cluster=cluster,
            security_group=security_group,
        )

        fargate_service.attach_to_application_target_group(
            target_group=app_target_group,
        )

        core.CfnOutput(
        self, "LoadBalancerDNS",
        value=elastic_loadbalancer.load_balancer_dns_name
        )
示例#11
0
    def __create_nlb_service(self, service_name: str, ctx: object):
        ctx_srv = getattr(ctx.inbound.services.nlb, service_name)

        ecs_task_role = self.__create_default_task_role(service_name)

        log_driver = ecs.LogDriver.aws_logs(log_group=self.log_group,
                                            stream_prefix=service_name)

        # create a Fargate task definition
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=f"{service_name}_task_definition",
            cpu=ctx_srv.size.cpu,
            memory_limit_mib=ctx_srv.size.ram,
            execution_role=self.ecs_exec_role,
            task_role=ecs_task_role,
        )

        # create a container definition and associate with the Fargate task
        container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
        container = ecs.ContainerDefinition(
            scope=self,
            id=f"{service_name}_container_definition",
            task_definition=task_definition,
            image=ecs.ContainerImage.from_ecr_repository(
                self.ecr_repository, "latest"),
            logging=log_driver,
            **container_vars)
        security_group = ec2.SecurityGroup(scope=self,
                                           id=f"{service_name}_sg",
                                           vpc=self.vpc)
        service = ecs.FargateService(
            scope=self,
            id=f"{service_name}_service",
            task_definition=task_definition,
            cluster=self.cluster,
            desired_count=getattr(ctx_srv, "desired_count",
                                  ctx.default_desired_count),
            service_name=service_name,
            security_group=security_group,
            health_check_grace_period=core.Duration.minutes(10))

        # map ports on the container
        for port in ctx_srv.ports:
            container.add_port_mappings(
                ecs.PortMapping(container_port=port,
                                host_port=port,
                                protocol=ecs.Protocol.TCP))
            # add a listener to network load balancer
            listener = self.load_balancer.add_listener(
                id=f"{service_name}_{port}", port=port)

            security_group.add_ingress_rule(
                ec2.Peer.ipv4(ctx.ingress_cidr), ec2.Port.tcp(port),
                f"Logstash ingress for {service_name}")

            target = (service).load_balancer_target(
                container_name=container.container_name, container_port=port)

            listener.add_targets(id=f"{service_name}_{port}_tg",
                                 port=port,
                                 targets=[target])

        scaling = service.auto_scale_task_count(
            max_capacity=ctx_srv.scaling.max_capacity,
            min_capacity=ctx_srv.scaling.min_capacity)

        scaling.scale_on_cpu_utilization(
            id="cpu_scaling",
            target_utilization_percent=ctx_srv.scaling.
            target_utilization_percent,
            scale_in_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_in_cooldown_seconds),
            scale_out_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_out_cooldown_seconds))
示例#12
0
    def __create_cloudmap_service(self, service_name: str, ctx: object):
        ctx_srv = getattr(ctx.inbound.services.cloudmap, service_name)

        ecs_task_role = self.__create_default_task_role(service_name)

        log_driver = ecs.LogDriver.aws_logs(log_group=self.log_group,
                                            stream_prefix=service_name)

        # create a Fargate task definition
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=f"{service_name}_task_definition",
            cpu=ctx_srv.size.cpu,
            memory_limit_mib=ctx_srv.size.ram,
            execution_role=self.ecs_exec_role,
            task_role=ecs_task_role,
        )

        # create a container definition and associate with the Fargate task
        container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
        container = ecs.ContainerDefinition(
            scope=self,
            id=f"{service_name}_container_definition",
            task_definition=task_definition,
            image=ecs.ContainerImage.from_ecr_repository(
                self.ecr_repository, "latest"),
            logging=log_driver,
            **container_vars)
        security_group = ec2.SecurityGroup(scope=self,
                                           id=f"{service_name}_sg",
                                           vpc=self.vpc)
        service = ecs.FargateService(scope=self,
                                     id=f"{service_name}_service",
                                     task_definition=task_definition,
                                     cluster=self.cluster,
                                     desired_count=getattr(
                                         ctx_srv, "desired_count",
                                         ctx.default_desired_count),
                                     service_name=service_name,
                                     security_group=security_group)

        for port in ctx_srv.ports:
            container.add_port_mappings(
                ecs.PortMapping(container_port=port,
                                host_port=port,
                                protocol=ecs.Protocol.TCP))
            security_group.add_ingress_rule(
                ec2.Peer.ipv4(ctx.ingress_cidr), ec2.Port.tcp(port),
                f"Logstash ingress for {service_name}")

        for port in ctx_srv.udp_ports:
            container.add_port_mappings(
                ecs.PortMapping(container_port=port,
                                host_port=port,
                                protocol=ecs.Protocol.UDP))
            security_group.add_ingress_rule(
                ec2.Peer.ipv4(ctx.ingress_cidr), ec2.Port.udp(port),
                f"Logstash ingress for {service_name}")

        scaling = service.auto_scale_task_count(
            max_capacity=ctx_srv.scaling.max_capacity,
            min_capacity=ctx_srv.scaling.min_capacity)

        scaling.scale_on_cpu_utilization(
            id="cpu_scaling",
            target_utilization_percent=ctx_srv.scaling.
            target_utilization_percent,
            scale_in_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_in_cooldown_seconds),
            scale_out_cooldown=core.Duration.seconds(
                ctx_srv.scaling.scale_out_cooldown_seconds))

        cloudmap = awssd.PublicDnsNamespace.from_public_dns_namespace_attributes(
            scope=self,
            id=f"cloudmap_namespace",
            **ctx.inbound.namespace_props.dict())

        service.enable_cloud_map(cloud_map_namespace=cloudmap,
                                 dns_record_type=awssd.DnsRecordType("A"),
                                 dns_ttl=core.Duration.seconds(15))
示例#13
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, repository: ecr.Repository, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        namespace = servicediscovery.PrivateDnsNamespace(
            scope=self,
            id="PRIVATE-DNS",
            vpc=vpc,
            name="private",
            description="a private dns"
        )

        sg = ec2.SecurityGroup(
            scope=self,
            id="SG",
            vpc=vpc,
            allow_all_outbound=True,
            description="open 9200 and 9300 ports",
            security_group_name="es-group"
        )
        sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(port=9200),
        )
        sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(port=9300),
        )

        #####################################################
        elastic_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="ES-TASK-DEF",
            network_mode=ecs.NetworkMode.AWS_VPC,
            volumes=[ecs.Volume(
                name="esdata",
                host=ecs.Host(source_path="/usr/share/elasticsearch/data"),
            )],
        )

        elastic = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=30),
            task_definition=elastic_task_def,
            memory_limit_mib=4500,
            essential=True,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=repository, tag='latest'),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                # "discovery.zen.ping.unicast.hosts": "elasticsearch",
                "node.name": constants.ES_CONTAINER_NAME,
                "node.master": "true",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms4g -Xmx4g",
            },
            logging=ecs.AwsLogDriver(
                stream_prefix="ES",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535))
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))

        elastic.add_port_mappings(ecs.PortMapping(container_port=9200))
        elastic.add_port_mappings(ecs.PortMapping(container_port=9300))

        elastic.add_mount_points(ecs.MountPoint(
            container_path="/usr/share/elasticsearch/data",
            source_volume="esdata",
            read_only=False,
        ))
        # elastic.add_volumes_from(ecs.VolumeFrom(
        #     source_container="esdata",
        #     read_only=False,
        #     ))

        es_service = ecs.Ec2Service(
            scope=self,
            id="ES-SERVICE",
            cluster=cluster,
            task_definition=elastic_task_def,
            desired_count=1,
            service_name="ES",
            security_group=sg,
        )

        es_lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="ES-ELB",
            vpc=vpc,
            internet_facing=True,
        )
        es_listener = es_lb.add_listener(
            id="ES-LISTENER",
            port=80,
        )
        es_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="ES-GRP",
                container_name=elastic.container_name,
                listener=ecs.ListenerConfig.application_listener(
                    listener=es_listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))

        service = es_service.enable_cloud_map(
            cloud_map_namespace=namespace,
            dns_record_type=servicediscovery.DnsRecordType.A,
            # dns_ttl=core.Duration.seconds(amount=30),
            failure_threshold=1,
            name="elastic",
        )

        core.CfnOutput(
            scope=self,
            id="DNS-ES",
            value=es_lb.load_balancer_dns_name,
        )

        #####################################################

        node_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="NODE-TASK-DEF",
            network_mode=ecs.NetworkMode.AWS_VPC,
            volumes=[ecs.Volume(
                name="esdata",
                host=ecs.Host(source_path="/usr/share/elasticsearch/data"),
            )],
        )

        node = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_NODE_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=40),
            task_definition=node_task_def,
            memory_limit_mib=4500,
            essential=True,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=repository, tag='latest'),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                "discovery.zen.ping.unicast.hosts": "elastic.private",
                "node.name": constants.ES_NODE_CONTAINER_NAME,
                "node.master": "false",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms4g -Xmx4g",
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="NODE",
                log_retention=logs.RetentionDays.ONE_DAY,
            ))

        node.add_port_mappings(ecs.PortMapping(container_port=9200))
        node.add_port_mappings(ecs.PortMapping(container_port=9300))

        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536))
        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))
        node.add_mount_points(ecs.MountPoint(
            container_path="/usr/share/elasticsearch/data",
            source_volume="esdata",
            read_only=False,
        ))

        node_service = ecs.Ec2Service(
            scope=self,
            id="ES-NODE-SERVICE",
            cluster=cluster,
            task_definition=node_task_def,
            desired_count=1,
            service_name="NODE",
            security_group=sg,
        )

        node_lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="NODE-ELB",
            vpc=vpc,
            internet_facing=True,
        )
        node_listener = node_lb.add_listener(
            id="NODE-LISTENER",
            port=80,
        )
        node_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="NODE-GRP",
                container_name=node.container_name,
                listener=ecs.ListenerConfig.application_listener(
                    listener=node_listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))
        core.CfnOutput(
            scope=self,
            id="DNS-NODE",
            value=node_lb.load_balancer_dns_name,
        )
示例#14
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        elastic_cluster_task_def = ecs.Ec2TaskDefinition(
            scope=self,
            id="ES-TASK-DEF",
            network_mode=ecs.NetworkMode.BRIDGE,
        )

        elastic = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=30),
            task_definition=elastic_cluster_task_def,
            memory_limit_mib=4024,
            essential=True,
            image=ecs.ContainerImage.from_registry(
                name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                # "discovery.zen.ping.unicast.hosts": "elasticsearch",
                "node.name": constants.ES_CONTAINER_NAME,
                "node.master": "true",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms2g -Xmx2g",
            },
            logging=ecs.AwsLogDriver(
                stream_prefix="ES",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
        )
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535))
        elastic.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))

        elastic.add_port_mappings(ecs.PortMapping(container_port=9200))
        elastic.add_port_mappings(ecs.PortMapping(container_port=9300))

        #####################################################
        node = ecs.ContainerDefinition(
            scope=self,
            id=constants.ES_NODE_CONTAINER_NAME,
            start_timeout=core.Duration.seconds(amount=40),
            task_definition=elastic_cluster_task_def,
            memory_limit_mib=4024,
            essential=True,
            image=ecs.ContainerImage.from_registry(
                name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"),
            environment={
                "cluster.name": constants.ES_CLUSTER_NAME,
                "bootstrap.memory_lock": "true",
                "discovery.zen.ping.unicast.hosts": constants.ES_CONTAINER_NAME,
                "node.name": constants.ES_NODE_CONTAINER_NAME,
                "node.master": "false",
                "node.data": "true",
                "ES_JAVA_OPTS": "-Xms2g -Xmx2g",
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="NODE",
                log_retention=logs.RetentionDays.ONE_DAY,
            ))

        node.add_port_mappings(ecs.PortMapping(container_port=9200))
        node.add_port_mappings(ecs.PortMapping(container_port=9300))

        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536))
        node.add_ulimits(ecs.Ulimit(
            name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1))
        node.add_link(container=elastic, alias=constants.ES_CONTAINER_NAME)

        #####################################################

        ecs_service = ecs.Ec2Service(
            scope=self,
            id="ES-SERVICE",
            cluster=cluster,
            task_definition=elastic_cluster_task_def,
            desired_count=1,
            service_name=constants.ECS_ES_SERVICE,
        )

        lb = elbv2.ApplicationLoadBalancer(
            scope=self,
            id="ELB",
            vpc=vpc,
            internet_facing=True,
        )
        listener = lb.add_listener(
            id="LISTENER",
            port=80,
        )
        ecs_service.register_load_balancer_targets(
            ecs.EcsTarget(
                new_target_group_id="TARGET-GRP",
                container_name=elastic.container_name,
                # container_port=9200,
                listener=ecs.ListenerConfig.application_listener(
                    listener=listener,
                    protocol=elbv2.ApplicationProtocol.HTTP),
            ))

        core.CfnOutput(
            scope=self,
            id="DNS-NAME",
            value=lb.load_balancer_dns_name,
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        public_subnets = ec2.SubnetConfiguration(
            name="Public", subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24)

        private_subnets = ec2.SubnetConfiguration(
            name="Tier2",
            subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT,
            cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(self,
                      'AWS-Cookbook-VPC',
                      cidr='10.10.0.0/22',
                      subnet_configuration=[public_subnets, private_subnets])

        fargate_service_security_group = ec2.SecurityGroup(
            self,
            'fargate_service_security_group',
            description='Security Group for the Fargate Service',
            allow_all_outbound=True,
            vpc=vpc)

        # create ECS Cluster
        ecs_cluster = ecs.Cluster(self,
                                  'AWS-Cookbook-EcsCluster',
                                  cluster_name='awscookbook207',
                                  vpc=vpc)

        FargateTask = ecs.FargateTaskDefinition(
            self,
            'FargateTask',
            cpu=256,
            memory_limit_mib=512,
        )

        ContainerDef = ecs.ContainerDefinition(
            self,
            'ContainerDef',
            image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"),
            task_definition=FargateTask,
        )

        ContainerDef.add_port_mappings(ecs.PortMapping(container_port=80))

        ecs.FargateService(
            self,
            'awscookbook207Service',
            cluster=ecs_cluster,
            task_definition=FargateTask,
            assign_public_ip=False,
            desired_count=1,
            enable_ecs_managed_tags=False,
            # health_check_grace_period=core.Duration.seconds(60),
            max_healthy_percent=100,
            min_healthy_percent=0,
            platform_version=ecs.FargatePlatformVersion('LATEST'),
            security_groups=[fargate_service_security_group],
            service_name='awscookbook207Service',
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType('PRIVATE_WITH_NAT')))

        # outputs

        CfnOutput(self, 'VpcId', value=vpc.vpc_id)

        CfnOutput(self, 'EcsClusterName', value=ecs_cluster.cluster_name)

        public_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PUBLIC)

        CfnOutput(self,
                  'VpcPublicSubnets',
                  value=', '.join(map(str, public_subnets.subnet_ids)))

        CfnOutput(self,
                  'AppSgId',
                  value=fargate_service_security_group.security_group_id)

        CfnOutput(self,
                  'ContainerIp',
                  value=fargate_service_security_group.security_group_id)