예제 #1
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        vpc = ec2.Vpc(self, "VPC")

        # Security group for our test instance
        my_sg = ec2.SecurityGroup(self,
                                  "my_sg",
                                  vpc=vpc,
                                  description="My sg for testing",
                                  allow_all_outbound=True)
        # Add ssh from anywhere
        my_sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22),
                               "Allow ssh access from anywhere")

        asg = autoscaling.AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.AmazonLinuxImage(),
        )
        asg.add_security_group(my_sg)  # add our security group, expects object

        ## Classic Elastic Load Balancer
        #lb = elb.LoadBalancer(
        #    self, "ELB",
        #    vpc=vpc,
        #    internet_facing=True,
        #    health_check={"port": 22}
        #)
        #lb.add_target(asg)
        #
        #listener = lb.add_listener(
        #    external_port=8000,
        #    external_protocol=elb.LoadBalancingProtocol.TCP,
        #    internal_port=22,
        #    internal_protocol=elb.LoadBalancingProtocol.TCP
        #)
        #listener.connections.allow_default_port_from_any_ipv4("Open to the world")

        # Network Load Balancer
        nlb = elbv2.NetworkLoadBalancer(
            self,
            "NLB",
            vpc=vpc,
            internet_facing=True,
            cross_zone_enabled=True,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))

        my_target = elbv2.NetworkTargetGroup(self,
                                             "MyTargetGroup",
                                             port=22,
                                             vpc=vpc)

        listener = nlb.add_listener("Listener",
                                    port=8000,
                                    default_target_groups=[my_target])
        my_target.add_target(asg)
예제 #2
0
    def __init__(self, scope: core.Construct, construct_id: str, vpc,
                 nlb_listener_port, nlb_name, nlb_id, internet_facing,
                 targetgroup_port, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #network load balancer
        nlb = elb.NetworkLoadBalancer(
            self,
            internet_facing=internet_facing,
            load_balancer_name=nlb_name,
            id=nlb_id,
            vpc=vpc,  # The object from above vpc from lookup
            vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets))

        #load balancer scuirty group
        sg_nlb = ec2.SecurityGroup(self,
                                   id="sg_nlb",
                                   vpc=vpc,
                                   security_group_name="sg_nlb")

        #listener
        listener = nlb.add_listener("Listener",
                                    port=nlb_listener_port,
                                    protocol=elb.Protocol.TCP)
        target_group = elb.NetworkTargetGroup(self,
                                              vpc=vpc,
                                              id="Target",
                                              port=targetgroup_port)
        listener.add_target_groups("TargetGroup", target_group)

        #sg_nlb ingress
        sg_nlb.add_ingress_rule(peer=ec2.Peer.ipv4("0.0.0.0/0"),
                                connection=ec2.Port.tcp(22))
        core.Tag(key="Owner", value="Wahaj-nlb")
예제 #3
0
 def create_network_load_balancer(self):
     """
     Create Network Load Balancer
     """
     return elb.NetworkLoadBalancer(
         self,
         "nlb",
         vpc=self.vpc,
         internet_facing=True,
         cross_zone_enabled=True,
         load_balancer_name="master",
     )
예제 #4
0
    def create_nlb(self, props):
        if "nlb" not in self.output_props:
            nlb = lb.NetworkLoadBalancer(
                self,
                id="nlb_cdk",
                vpc=props['vpc'],
                internet_facing=True,
                cross_zone_enabled=True,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PUBLIC))

            self.output_props["nlb"] = nlb
            self.output_props["nlb_dns_name"] = nlb.load_balancer_dns_name
예제 #5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            self, 'Vpc',
            cidr='172.16.0.0/16',
            max_azs=2,
            nat_gateways=1,
        )

        user_data = ec2.UserData.for_linux()
        user_data.add_commands(raw_user_data)

        role = iam.Role(
            self, 'Ec2SsmRole',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore')
            ],
        )

        asg = autoscaling.AutoScalingGroup(
            self, 'ASG',
            role=role,
            vpc=vpc,
            user_data=user_data,
            instance_type=ec2.InstanceType.of(
                instance_class=ec2.InstanceClass.BURSTABLE3_AMD,
                instance_size=ec2.InstanceSize.NANO,
            ),
            machine_image=ec2.AmazonLinuxImage(
              generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            ),
            min_capacity=4,
            max_capacity=4,
            update_type=autoscaling.UpdateType.ROLLING_UPDATE,
        )

        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(80))

        # Only possible with ALB
        # asg.scale_on_request_count('AModestLoad',
        #   target_requests_per_second=1
        # )

        nlb = elbv2.NetworkLoadBalancer(
            self, 'NLB',
            vpc=vpc,
            internet_facing=False,
            cross_zone_enabled=True,
        )

        listener = nlb.add_listener(
            'Listener',
            port=80,
        )

        listener.add_targets(
            'Target',
            port=80,
            deregistration_delay=core.Duration.seconds(10),
            targets=[asg],
        )

        # TODO: replace by CDK construct when available
        service = ec2.CfnVPCEndpointService(
            self, 'Service',
            network_load_balancer_arns=[nlb.load_balancer_arn],
            acceptance_required=False,
        )
        self.endpoint_service = service
예제 #6
0
    def __init__(self, scope: core.Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        # The code that defines your stack goes here

        vpc = ec2.Vpc(
            self,
            "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.ISOLATED,
                                        name="DB",
                                        cidr_mask=24)
            ],
            nat_gateways=2)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
            description="Specify the instance type you want").value_as_string

        key_name = CfnParameter(
            self,
            "ssh key",
            type="String",
            default="key_ireland",
            description="Specify your SSH key").value_as_string
        # Create Bastion Server
        bastion = ec2.BastionHostLinux(
            self,
            "Bastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="BastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create NLB
        nlb = elb.NetworkLoadBalancer(self,
                                      "emq-elb",
                                      vpc=vpc,
                                      internet_facing=True,
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        listener = nlb.add_listener("port1883", port=1883)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        asg = autoscaling.AutoScalingGroup(
            self,
            "emq-asg",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            health_check=HealthCheck.elb(grace=Duration.seconds(60)),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=4)

        user_defined_tags = self.node.try_get_context("tags")

        if user_defined_tags:
            tags = user_defined_tags.split(' ')
            core.Tags.of(asg).add(*tags)

        # NLB cannot associate with a security group therefore NLB object has no Connection object
        # Must modify manuall inbound rule of the newly created asg security group to allow access
        # from NLB IP only
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(1883),
            "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(18083),
                                            "Allow NLB access WEB UI")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")

        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        asg.connections.allow_from(bastion, ec2.Port.tcp(22),
                                   "Allow SSH from the bastion only")
        listener.add_targets("addTargetGroup", port=1883, targets=[asg])

        # @todo we need ssl terminataion
        # listenerUI.add_targets("addTargetGroup",
        #     port=18083,
        #     targets=[asg])
        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name)
예제 #7
0
    def __init__(self, scope: core.Construct, id: str, ctx: object,
                 ecr_repository: ecr.Repository, kinesis_stream: ks.Stream,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.ecr_repository = ecr_repository

        self.vpc = ec2.Vpc.from_vpc_attributes(self, "VPC",
                                               **ctx.vpc_props.dict())

        # CloudWatch Logs Group
        self.log_group = cwl.LogGroup(scope=self, id="logs")

        self.kinesis_stream = kinesis_stream

        # Create a new ECS cluster for our services
        self.cluster = ecs.Cluster(self, vpc=self.vpc, id=f"{id}_cluster")
        cluster_name_output = core.CfnOutput(scope=self,
                                             id="cluster-name-out",
                                             value=self.cluster.cluster_name,
                                             export_name=f"{id}-cluster-name")

        # Create a role for ECS to interact with AWS APIs with standard permissions
        self.ecs_exec_role = iam.Role(
            scope=self,
            id="ecs_logstash-exec_role",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            managed_policies=([
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonECSTaskExecutionRolePolicy")
            ]))
        # Grant ECS additional permissions to decrypt secrets from Secrets Manager that have been encrypted with our custom key
        if getattr(ctx, "secrets_key_arn", None) is not None:
            self.ecs_exec_role.add_to_policy(
                iam.PolicyStatement(actions=["kms:Decrypt"],
                                    effect=iam.Effect.ALLOW,
                                    resources=[ctx.secrets_key_arn]))
        # Grant ECS permissions to log to our log group
        self.log_group.grant_write(self.ecs_exec_role)

        # Load Balancer for Listening Services
        self.load_balancer = elb2.NetworkLoadBalancer(scope=self,
                                                      id=f"{id}-nlb",
                                                      vpc=self.vpc,
                                                      internet_facing=False,
                                                      cross_zone_enabled=True)

        # Create listener services
        service_names = []
        for service_name in getattr(ctx.inbound.services, "nlb", []):
            self.__create_nlb_service(service_name[0], ctx)
            service_names.append(service_name[0])

        for service_name in getattr(ctx.inbound.services, "cloudmap", []):
            self.__create_cloudmap_service(service_name[0], ctx)
            service_names.append(service_name[0])

        for service_name in getattr(ctx.inbound.services, "pull", []):
            self.__create_pull_service(service_name[0], ctx)
            service_names.append(service_name[0])

        service_names_output = core.CfnOutput(
            scope=self,
            id="service-names-out",
            value=",".join(service_names),
            export_name=f"{id}-service-names")
예제 #8
0
파일: ec2.py 프로젝트: edsenabr/mtls-demo
    def __init__(self, stack: core.Stack, VPC: ec2.Vpc) -> None:

        bastionSG = ec2.SecurityGroup(
            stack,
            "BastionSecurityGroup",
            vpc=VPC,
            description="Allow ssh access to the bastion host",
            allow_all_outbound=True)

        bastionSG.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), ''
                                   "allow ssh access from the world")

        bastionSG.add_ingress_rule(ec2.Peer.any_ipv4(),
                                   ec2.Port.tcp_range(8081, 8083),
                                   "allow http(s) access from the world")

        userData = ec2.UserData.for_linux()
        userData.add_commands("""
            set -v
            apt update
            apt install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common gdebi-core ec2-instance-connect
            curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
            add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu	$(lsb_release -cs)	stable"
            apt install -y docker-ce docker-ce-cli containerd.io
            usermod -a -G docker ubuntu
            curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
            chmod +x /usr/local/bin/docker-compose
            echo 'version: "3.8"' > /home/ubuntu/docker-compose.yaml
            echo 'services:' >> /home/ubuntu/docker-compose.yaml
            echo '  web:' >> /home/ubuntu/docker-compose.yaml
            echo '    image: %s/mtls-demo-web' >> /home/ubuntu/docker-compose.yaml
            echo '    network_mode: "service:proxy"' >> /home/ubuntu/docker-compose.yaml
            echo '    depends_on:' >> /home/ubuntu/docker-compose.yaml
            echo '      - proxy' >> /home/ubuntu/docker-compose.yaml
            echo '    restart: unless-stopped' >> /home/ubuntu/docker-compose.yaml
            echo '  proxy:' >> /home/ubuntu/docker-compose.yaml
            echo '    image: %s/mtls-demo-proxy' >> /home/ubuntu/docker-compose.yaml
            echo '    ports:' >> /home/ubuntu/docker-compose.yaml
            echo '      - "9901:9901"' >> /home/ubuntu/docker-compose.yaml
            echo '      - "8081:8081"' >> /home/ubuntu/docker-compose.yaml
            echo '      - "8082:8082"' >> /home/ubuntu/docker-compose.yaml
            echo '      - "8083:8083"' >> /home/ubuntu/docker-compose.yaml
            echo '      - "8084:8084"' >> /home/ubuntu/docker-compose.yaml
            echo '    restart: unless-stopped' >> /home/ubuntu/docker-compose.yaml
            echo 'networks:' >> /home/ubuntu/docker-compose.yaml
            echo '  public:' >> /home/ubuntu/docker-compose.yaml
            echo '    external: true' >> /home/ubuntu/docker-compose.yaml
            /usr/local/bin/docker-compose -f /home/ubuntu/docker-compose.yaml up
        """ % (stack.node.try_get_context("prefix"),
               stack.node.try_get_context("prefix")))

        # no key installed, use
        #
        #   aws ec2-instance-connect send-ssh-public-key
        #
        # to install a temporary key and gain access to the vm
        bastion = ec2.Instance(
            stack,
            "Bastion",
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.MachineImage.lookup(
                name="ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"
            ),
            vpc=VPC,
            security_group=bastionSG,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            user_data=userData)
        core.CfnOutput(stack, "BastionIP", value=bastion.instance_public_ip)
        core.CfnOutput(stack, "BastionInstanceID", value=bastion.instance_id)
        core.CfnOutput(
            stack,
            "BastionSendSSHKeyCommand",
            value=
            "aws ec2-instance-connect send-ssh-public-key --instance-id %s --instance-os-user ubuntu --availability-zone %s --ssh-public-key file://~/.ssh/id_rsa.pub"
            % (bastion.instance_id, bastion.instance_availability_zone))

        nlb = elbv2.NetworkLoadBalancer(
            stack,
            "NLB",
            vpc=VPC,
            internet_facing=True,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))
        core.CfnOutput(stack, "NLBAddress", value=nlb.load_balancer_dns_name)
        core.Tag.add(nlb, "stack", "ec2", apply_to_launched_instances=True)

        nlb.add_listener(
            "HTTP",
            port=80,
            default_target_groups=[
                elbv2.NetworkTargetGroup(
                    stack,
                    "HTTPDefaultTargetGroup",
                    port=8081,
                    vpc=VPC,
                    targets=[elbv2.InstanceTarget(bastion.instance_id, 8081)])
            ])

        nlb.add_listener(
            "mTLS",
            port=443,
            default_target_groups=[
                elbv2.NetworkTargetGroup(
                    stack,
                    "mTLSDefaultTargetGroup",
                    port=8083,
                    vpc=VPC,
                    targets=[elbv2.InstanceTarget(bastion.instance_id, 8083)])
            ])
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Create an S3 bucket where we will store the UDP logs
        udp_logs_bucket = s3.Bucket(self, "UDPLogsBucket")

        # Create a VPC using CDK's default VPC architecture. See README for diagram.
        vpc = ec2.Vpc(self, "VPC")

        workers_asg = autoscaling.AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC if PUBLIC_ACCESS else ec2.
                SubnetType.PRIVATE),
            instance_type=ec2.InstanceType("t2.small"),
            machine_image=ec2.AmazonLinuxImage(),
            desired_capacity=1 if PUBLIC_ACCESS else 2)

        # Create a security group that controls access to the NLB > Instances
        # It is important to note that NLB security works different than Classic ELB or ALB
        # NLBs do not attach security groups, security controls are managed on the instances themselves!
        allow_udp_sg = ec2.SecurityGroup(
            self,
            "AllowUdpSG",
            vpc=vpc,
            description="Allow UDP listener through Network Load Balancer",
            allow_all_outbound=
            False,  # The default SG for the ASG is already allowing all outbound
        )

        # Add rules to the security group for internal access, and the configured NLB_PUBLIC_ACCESS
        for ipv4 in [NLB_ACCESS_IPV4, vpc.vpc_cidr_block]:
            allow_udp_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(ipv4),
                connection=ec2.Port(
                    string_representation=str(UDP_LISTEN_PORT),
                    protocol=ec2.Protocol.UDP,
                    from_port=UDP_LISTEN_PORT,
                    to_port=UDP_LISTEN_PORT,
                ),
            )
        workers_asg.add_security_group(allow_udp_sg)

        # Add the td-agent to our worker instances using user-data scripts
        # Example of using an external function to modify a resource
        install_td_agent_user_data(workers_asg, udp_logs_bucket,
                                   UDP_LISTEN_PORT)

        # Attach the SSM Managed policy for managing the instance through SSM Sessions
        # This allows us to ditch bastions hosts!
        # This policy also is granting us S3 access for logging, if you remove it you will need to add an IAM role
        # with access to the s3 bucket.
        managed_policy = iam.ManagedPolicy().from_aws_managed_policy_name(
            managed_policy_name="service-role/AmazonEC2RoleforSSM")
        workers_asg.role.add_managed_policy(managed_policy)

        # Create a network load balancer for accepting our UDP logs
        # This will create the required EIPs when configured for Public Access
        lb = elbv2.NetworkLoadBalancer(self,
                                       "LB",
                                       vpc=vpc,
                                       cross_zone_enabled=True,
                                       internet_facing=PUBLIC_ACCESS)

        # Create a listener & target group for our NLB.
        # It is important to note that the TCP protocol will be overriden to UDP shortly
        listener = lb.add_listener("Listener",
                                   port=UDP_LISTEN_PORT,
                                   protocol=elbv2.Protocol.TCP)
        target_group = elbv2.NetworkTargetGroup(self,
                                                vpc=vpc,
                                                id="Target",
                                                port=UDP_LISTEN_PORT,
                                                targets=[workers_asg])
        listener.add_target_groups("TargetGroupUdp", target_group)

        # Workaround for lack of UDP NLB support in CDK.
        # As of writing this CDK does not have support for UDP NLBs
        # TODO - Remove these overrides when support is added
        # https://github.com/awslabs/aws-cdk/issues/3107
        listener.node.find_child("Resource").add_property_override(
            "Protocol", "UDP")
        target_group.node.find_child("Resource").add_property_override(
            "Protocol", "UDP")
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)

        self.__key = KeyPair(self,
                             "bastion-keypair",
                             name=_config.Redshift.BASTION_HOST_KEY_PAIR_NAME,
                             description="Key Pair to connect to bastion host",
                             resource_prefix="ara-redshift-bastion")

        self._bastion_sg = ec2.SecurityGroup(self,
                                             id="bastion-sg",
                                             vpc=vpc,
                                             allow_all_outbound=None,
                                             description=None,
                                             security_group_name="bastion-sg")

        # a proper ip address needs to be configured before stack deployment
        if _config.Redshift.LOCAL_IP is not None:
            self._bastion_sg.add_ingress_rule(
                ec2.Peer.ipv4(_config.Redshift.LOCAL_IP), ec2.Port.tcp(22))

        # Instance Role and SSM Managed Policy
        self._role = iam.Role(
            self,
            "InstanceSSM",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
            role_name='bastion-host-role')

        self._role.add_to_policy(
            iam.PolicyStatement(
                actions=[
                    'secretsmanager:GetResourcePolicy',
                    'secretsmanager:GetSecretValue',
                    'secretsmanager:DescribeSecret',
                    'secretsmanager:ListSecretVersionIds'
                ],
                resources=[
                    stack.format_arn(
                        service='secretsmanager',
                        resource='secret:ec2-private-key/' +
                        _config.Redshift.BASTION_HOST_KEY_PAIR_NAME + '*')
                ]))

        self._role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        bastion_asg = _autoscaling.AutoScalingGroup(
            self,
            'bastion-autoscaling',
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,
                                              ec2.InstanceSize.NANO),
            machine_image=ec2.AmazonLinuxImage(),
            vpc=vpc,
            key_name=_config.Redshift.BASTION_HOST_KEY_PAIR_NAME,
            role=self._role,
            security_group=self._bastion_sg,
            vpc_subnets=ec2.SubnetSelection(
                availability_zones=None,
                one_per_az=None,
                subnet_group_name=None,
                subnet_name=None,
                subnets=None,
                subnet_type=ec2.SubnetType.PRIVATE),
            cooldown=core.Duration.minutes(1),
            min_capacity=1,
            max_capacity=3,
            spot_price="0.005")

        self.__bastion_nlb = _elbv2.NetworkLoadBalancer(
            self,
            'bastion_elb',
            vpc=vpc,
            internet_facing=True,
            vpc_subnets=ec2.SubnetSelection(availability_zones=None,
                                            one_per_az=None,
                                            subnet_group_name=None,
                                            subnet_name=None,
                                            subnets=None,
                                            subnet_type=ec2.SubnetType.PUBLIC))

        listener = self.__bastion_nlb.add_listener("Listener", port=22)
        listener.add_targets("Target", port=22, targets=[bastion_asg])
예제 #11
0
    def __init__(self, scope: core.Construct, id: str, pub_hosted_zone: object,
                 **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        sts = boto3.client("sts")
        deploy_account_id = sts.get_caller_identity()["Account"]
        deploy_region = sts.meta.region_name

        #TODO ADD an ASG
        vpc = ec2.Vpc(
            self,
            "iais-public",
            nat_gateways=0,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="private",
                                        subnet_type=ec2.SubnetType.ISOLATED),
            ])

        sg = ec2.SecurityGroup(self,
                               f"iais-sg-{str}",
                               vpc=vpc,
                               allow_all_outbound=True,
                               description="For HTTPS access.")

        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(443))

        self.sg = sg
        self.vpc = vpc

        instance_ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        role = iam.Role(self,
                        "iais-web-server-roles",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonRekognitionFullAccess"))

        instance = ec2.Instance(self,
                                "iais-web-server-instance",
                                instance_type=ec2.InstanceType("t2.micro"),
                                machine_image=instance_ami,
                                vpc=vpc,
                                role=role,
                                security_group=sg)

        instance_target = targets.InstanceIdTarget(
            instance_id=instance.instance_id, port=443)

        lb = elbv2.NetworkLoadBalancer(self,
                                       f"iais-lb-{str}",
                                       vpc=vpc,
                                       internet_facing=True)

        lb_tg = elbv2.NetworkTargetGroup(self,
                                         vpc=vpc,
                                         id=f"iais-tg-{str}",
                                         port=443,
                                         targets=[instance_target])

        lb_listener = lb.add_listener(f"iais-listener-{str}",
                                      port=443,
                                      default_target_groups=[lb_tg])

        r53.ARecord(self,
                    "AliasRecord",
                    zone=pub_hosted_zone,
                    target=r53.RecordTarget.from_alias(
                        r53t.LoadBalancerTarget(lb)))

        r53.ARecord(self,
                    "AliasRecordWww",
                    zone=pub_hosted_zone,
                    record_name="www.imageaisearch.com",
                    target=r53.RecordTarget.from_alias(
                        r53t.LoadBalancerTarget(lb)))

        secrets_man_policy = iam.Policy(
            self,
            "iais",
            roles=[role],
            policy_name="iais-web-server-secrets-manager",
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "secretsmanager:GetResourcePolicy",
                        "secretsmanager:GetSecretValue",
                        "secretsmanager:DescribeSecret",
                        "secretsmanager:ListSecretVersionIds"
                    ],
                    resources=[
                        f"arn:aws:secretsmanager:{deploy_region}:{deploy_account_id}:secret:DJANGO_SECRET_KEY-mHAOZX"
                    ])
            ])

        secrets_man_policy.attach_to_role(role)
예제 #12
0
    def __init__(self, scope: core.Construct, id: str, vpc_id: str,
                 key_name: str, instance_role_arn: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        vpc = ec2.Vpc.from_lookup(self, "VPC", is_default=False, vpc_id=vpc_id)
        role = iam.Role.from_role_arn(self,
                                      'Role',
                                      instance_role_arn,
                                      mutable=False)

        user_data_script = open("./fortune/userdata.sh", "rb").read()
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(str(user_data_script, 'utf-8'))

        app_sg = ec2.SecurityGroup(
            self,
            id="FortuneAppSG",
            vpc=vpc,
            security_group_name="FortuneAppSG",
        )
        bastion_sg = ec2.SecurityGroup.from_security_group_id(
            self, "BastionSG", security_group_id=BASTION_SECURITY_GROUP_ID)
        app_sg.add_ingress_rule(peer=bastion_sg,
                                connection=ec2.Port.all_traffic(),
                                description="AllowBastionSG")
        app_sg.add_ingress_rule(ec2.Peer.ipv4('10.22.0.0/16'),
                                ec2.Port.tcp(80),
                                description="TrustedNetwork")

        asg = autoscaling.AutoScalingGroup(
            scope=self,
            id="ASG",
            vpc=vpc,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            user_data=user_data,
            role=role,
            key_name=key_name,
            min_capacity=1,
            desired_capacity=1,
            max_capacity=3,
            spot_price="0.0132",  # TODO use Launch Template to setup Spot.
            security_group=app_sg,
            group_metrics=[autoscaling.GroupMetrics.all()],
            health_check=autoscaling.HealthCheck.elb(
                grace=core.Duration.seconds(10)),
        )

        core.Tags.of(asg).add("Name",
                              "fortune-app/ASG",
                              apply_to_launched_instances=True)
        core.Tags.of(asg).add("project",
                              "fortune",
                              apply_to_launched_instances=True)

        # private NLB
        nlb = elbv2.NetworkLoadBalancer(self,
                                        "FortunePrivateNLB",
                                        cross_zone_enabled=True,
                                        vpc=vpc,
                                        internet_facing=False)

        health_check = elbv2.HealthCheck(
            path="/",
            protocol=elbv2.ApplicationProtocol.HTTP,
            healthy_threshold_count=3,
            unhealthy_threshold_count=3,
            interval=core.Duration.seconds(10),
        )

        listener = nlb.add_listener("PrivateListener80", port=80)
        listener.add_targets(
            "Target",
            port=80,
            targets=[asg],
            deregistration_delay=core.Duration.seconds(10),
            health_check=health_check,
        )

        # asg.scale_on_outgoing_bytes(id="ScaleOnNetworkOut", target_bytes_per_second=100000)
        core.CfnOutput(self,
                       "NetworkLoadBalancer",
                       export_name="NetworkLoadBalancer",
                       value=nlb.load_balancer_dns_name)
예제 #13
0
    def __init__(self, scope: core.Construct, id: str, vpc, env,
                 **kwargs) -> None:
        super().__init__(scope, id, env=env, **kwargs)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
            description="Specify the instance type you want").value_as_string

        key_name = CfnParameter(
            self,
            "ssh key",
            type="String",
            default="key_ireland",
            description="Specify your SSH key").value_as_string

        # Create Bastion Server
        bastion = ec2.BastionHostLinux(
            self,
            "Bastion",
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name="BastionHostLinux",
            instance_type=ec2.InstanceType(
                instance_type_identifier="t2.micro"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                                "Internet access SSH")

        # Create NLB
        nlb = elb.NetworkLoadBalancer(self,
                                      "emq-elb",
                                      vpc=vpc,
                                      internet_facing=True,
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        listener = nlb.add_listener("port1883", port=1883)

        # Create Autoscaling Group with desired 2*EC2 hosts
        asg = autoscaling.AutoScalingGroup(
            self,
            "emq-asg",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key_name,
            user_data=ec2.UserData.custom(user_data),
            health_check=HealthCheck.elb(grace=Duration.seconds(60)),
            desired_capacity=2,
            min_capacity=2,
            max_capacity=4)

        # NLB cannot associate with a security group therefore NLB object has no Connection object
        # Must modify manuall inbound rule of the newly created asg security group to allow access
        # from NLB IP only
        asg.connections.allow_from_any_ipv4(
            ec2.Port.tcp(1883),
            "Allow NLB access 1883 port of EC2 in Autoscaling Group")

        asg.connections.allow_from(bastion, ec2.Port.tcp(22),
                                   "Allow SSH from the bastion only")

        listener.add_targets("addTargetGroup", port=1883, targets=[asg])

        core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name)
예제 #14
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, *, slaves=2, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster = ecs.Cluster(self, "cluster", vpc=vpc)

        locust_asset = ecr_assets.DockerImageAsset(self, 'locust', directory="docker", file="app/Dockerfile")

        master_task = ecs.FargateTaskDefinition(
            self,
            "mastert",
            cpu=512,
            memory_limit_mib=1024
        )

        sg_slave = ec2.SecurityGroup(self, "sgslave", vpc=vpc, allow_all_outbound=True)

        sg_master = ec2.SecurityGroup(self, "sgmaster", vpc=vpc, allow_all_outbound=True)
        sg_master.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8089))
        sg_master.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5557))

        master_container = master_task.add_container(
            "masterc",
            image=ecs.ContainerImage.from_docker_image_asset(locust_asset),
            logging=ecs.LogDriver.aws_logs(stream_prefix="master"),
            command=["-f", "/mnt/locust/locustfile.py",  "--master"]
        )

        master_container.add_port_mappings(ecs.PortMapping(container_port=8089, host_port=8089))
        master_container.add_port_mappings(ecs.PortMapping(container_port=5557, host_port=5557))

        master_service = ecs.FargateService(
            self, "masters",
            cluster=cluster,
            task_definition=master_task,
            desired_count=1,
            assign_public_ip=True,
            security_group=sg_master
        )

        nlb = elbv2.NetworkLoadBalancer(
            self,
            "nbalancer",
            internet_facing=True,
            vpc=vpc
        )

        listener_master_console = nlb.add_listener(
            "masterconsole",
            port=8089,
            protocol=elbv2.Protocol("TCP")
        )

        listener_console = nlb.add_listener(
            "master",
            port=5557,
            protocol=elbv2.Protocol("TCP")
        )

        listener_master_console.add_targets(
            "consoletarget",
            deregistration_delay=core.Duration.seconds(1),
            port=8089,
            targets=[master_service.load_balancer_target(
                container_name="masterc",
                container_port=8089
            )],
            health_check=elbv2.HealthCheck(
                healthy_threshold_count=2,
                unhealthy_threshold_count=2,
                timeout=core.Duration.seconds(10)
            )
        )

        listener_console.add_targets(
            "mastertarget",
            deregistration_delay=core.Duration.seconds(1),
            port=5557,
            targets=[master_service.load_balancer_target(
                container_name="masterc",
                container_port=5557
            )],
            health_check=elbv2.HealthCheck(
                healthy_threshold_count=2,
                unhealthy_threshold_count=2,
                timeout=core.Duration.seconds(10)
            )
        )

        slave_task = ecs.FargateTaskDefinition(
            self,
            "slavet",
            cpu=2048,
            memory_limit_mib=4096
        )

        slave_task.add_container(
            "slavec",
            image=ecs.ContainerImage.from_docker_image_asset(locust_asset),
            logging=ecs.LogDriver.aws_logs(stream_prefix="slave"),
            command=["-f", "/mnt/locust/locustfile.py", "--worker", "--master-host", nlb.load_balancer_dns_name]
        )

        ecs.FargateService(
            self, "slaves",
            cluster=cluster,
            task_definition=slave_task,
            desired_count=slaves,
            assign_public_ip=True,
            security_group=sg_slave
        )

        core.CfnOutput(self, "LocustWebConsole", value="http://" + nlb.load_balancer_dns_name + ":8089")
예제 #15
0
    def __init__(self, scope: core.Construct, construct_id: str, env, **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)
        
        # The code that defines your stack goes here
        if self.node.try_get_context("tags"):
            self.user_defined_tags = self.node.try_get_context("tags").split(' ')
        else:
            self.user_defined_tags = None

        vpc = ec2.Vpc(self, "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PUBLIC,
                name="Public",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.PRIVATE,
                name="Private",
                cidr_mask=24
            ), ec2.SubnetConfiguration(
                subnet_type=ec2.SubnetType.ISOLATED,
                name="DB",
                cidr_mask=24
            )
            ],
            nat_gateways=2
            )
        self.vpc = vpc


        # Route53
        int_zone = r53.PrivateHostedZone(self, r53_zone_name,
                                         zone_name = 'int.emqx',
                                         vpc = vpc
        )

        self.int_zone = int_zone

        # Define cfn parameters
        # ec2_type = CfnParameter(self, "ec2-instance-type",
        #     type="String", default="m5.2xlarge",
        #     description="Specify the instance type you want").value_as_string
        
        key_name = CfnParameter(self, "ssh key",
            type="String", default="key_ireland",
            description="Specify your SSH key").value_as_string

        sg = ec2.SecurityGroup(self, id = 'sg_int', vpc = vpc)
        self.sg = sg

        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'SSH frm anywhere')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(1883), 'MQTT TCP Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8883), 'MQTT TCP/TLS Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.udp(14567), 'MQTT Quic Port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(18083), 'WEB UI')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4369), 'EMQX dist port 1')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4370), 'EMQX dist port 2')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8081), 'EMQX dashboard')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2379), 'etcd client port')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2380), 'etcd peer port')

         # Create Bastion Server
        bastion = ec2.BastionHostLinux(self, "Bastion",
                                       vpc=vpc,
                                       subnet_selection=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                                       instance_name="BastionHostLinux",
                                       instance_type=ec2.InstanceType(instance_type_identifier="t3.nano"))

        bastion.instance.instance.add_property_override("KeyName", key_name)
        bastion.connections.allow_from_any_ipv4(
            ec2.Port.tcp(22), "Internet access SSH")
    
        # Create NLB
        nlb = elb.NetworkLoadBalancer(self, "emq-elb",
                                      vpc=vpc,
                                      internet_facing=False, 
                                      cross_zone_enabled=True,
                                      load_balancer_name="emq-nlb")

        r53.ARecord(self, "AliasRecord",
                    zone = int_zone,
                    record_name = loadbalancer_dnsname,
                    target = r53.RecordTarget.from_alias(r53_targets.LoadBalancerTarget(nlb))
                    )

        self.nlb = nlb

        listener = nlb.add_listener("port1883", port=1883)
        listenerTLS = nlb.add_listener("port8883", port=8883) # TLS, emqx terminataion
        listenerQuic = nlb.add_listener("port14567", port=14567, protocol=elbv2.Protocol.UDP)
        listenerUI = nlb.add_listener("port80", port=80)

        # Create Autoscaling Group with desired 2*EC2 hosts
        # asg = autoscaling.AutoScalingGroup(self, "emq-asg",
        #                                    vpc=vpc,
        #                                    vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
        #                                    instance_type=ec2.InstanceType(
        #                                        instance_type_identifier=ec2_type),
        #                                    machine_image=linux_ami,
        #                                    security_group = sg,
        #                                    key_name=key_name,
        #                                    user_data=ec2.UserData.custom(user_data),
        #                                    health_check=HealthCheck.elb(grace=Duration.seconds(60)),
        #                                    desired_capacity=3,
        #                                    min_capacity=2,
        #                                    max_capacity=4
        # )

        # if self.user_defined_tags:
        #     core.Tags.of(asg).add(*self.user_defined_tags)

        # # NLB cannot associate with a security group therefore NLB object has no Connection object
        # # Must modify manuall inbound rule of the newly created asg security group to allow access
        # # from NLB IP only
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(1883), "Allow NLB access 1883 port of EC2 in Autoscaling Group")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(18083), "Allow NLB access WEB UI")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4369), "Allow emqx cluster discovery port 1")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.udp(4370), "Allow emqx cluster discovery port 2")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(8081), "Allow emqx cluster dashboard access")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from_any_ipv4(
        #     ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)")
        # asg.connections.allow_from(bastion,
        #     ec2.Port.tcp(22), "Allow SSH from the bastion only")

        self.setup_emqx(numEmqx, vpc, int_zone, sg, key_name)

        listener.add_targets('ec2',
                             port=1883,
                             targets=
                                 [ target.InstanceTarget(x)
                                   for x in self.emqx_vms])
        # @todo we need ssl terminataion
        listenerUI.add_targets('ec2',
                               port=18083,
                               targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerQuic.add_targets('ec2',
                                 port=14567,
                                 protocol=elbv2.Protocol.UDP,
                                 targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        listenerTLS.add_targets('ec2',
                                port=8883,
                                targets=[ target.InstanceTarget(x)
                                   for x in self.emqx_vms])

        """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=vpc,
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=core.Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self, "para-group-mysql",
                parameter_group_name="default.mysql5.7"),
            )

        asg_security_groups = asg.connections.security_groups
        for asg_sg in asg_security_groups:
            db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """

        #self.setup_monitoring()

        self.setup_etcd(vpc, int_zone, sg, key_name)
        self.setup_loadgen(numLg, vpc, int_zone, sg, key_name, nlb.load_balancer_dns_name)

        self.setup_monitoring()

        core.CfnOutput(self, "Output",
            value=nlb.load_balancer_dns_name)
        core.CfnOutput(self, "SSH Entrypoint",
                       value=bastion.instance_public_ip)
        core.CfnOutput(self, "SSH cmds",
                       value="ssh -A -l ec2-user %s -L8888:%s:80 -L 9999:%s:80 -L 13000:%s:3000"
                       % (bastion.instance_public_ip, nlb.load_balancer_dns_name, self.mon_lb, self.mon_lb)
        )
예제 #16
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 stack_name: str,
                 task_definition_cpu: int,
                 task_definition_memory_limit_mib: int,
                 docker_image_name: str,
                 container_port: int,
                 desired_container_count: int,
                 private_subnets: Sequence[aws_ec2.Subnet] = None,
                 public_subnets: Sequence[aws_ec2.Subnet] = None,
                 private_security_group: aws_ec2.SecurityGroup = None,
                 public_security_group: aws_ec2.SecurityGroup = None,
                 vpc: aws_ec2.Vpc = None,
                 fargate_cluster: aws_ecs.Cluster = None,
                 authorizer_lambda_arn: str = None,
                 authorizer_lambda_role_arn: str = None,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        # Role
        self.role = aws_iam.Role(
            self,
            'Role',
            assumed_by=aws_iam.ServicePrincipal(service='ecs.amazonaws.com'),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name=
                    'service-role/AmazonECSTaskExecutionRolePolicy')
            ],
            inline_policies={
                id:
                aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        effect=aws_iam.Effect.ALLOW,
                        actions=[
                            'kms:Encrypt',
                            'kms:Decrypt',
                            'kms:ReEncrypt*',
                            'kms:GenerateDataKey*',
                            'kms:DescribeKey',
                            'ec2:CreateNetworkInterface',
                            'ec2:DescribeNetworkInterfaces',
                            'ec2:DeleteNetworkInterface',
                            # Remaining actions from https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-ecs.html
                            'elasticloadbalancing:DeregisterInstancesFromLoadBalancer',
                            'elasticloadbalancing:DeregisterTargets',
                            'elasticloadbalancing:Describe*',
                            'elasticloadbalancing:RegisterInstancesWithLoadBalancer',
                            'elasticloadbalancing:RegisterTargets',
                            'ec2:Describe*',
                            'ec2:AuthorizeSecurityGroupIngress'
                        ],
                        resources=['*'])
                ])
            })
        self.role.assume_role_policy.add_statements(
            aws_iam.PolicyStatement(
                actions=['sts:AssumeRole'],
                principals=[
                    aws_iam.ServicePrincipal(service='ecs-tasks.amazonaws.com')
                ]))

        # Set Defaults if parameters are None
        if vpc is None:
            vpc = aws_ec2.Vpc(self, 'Vpc')

        if private_subnets is None:
            private_subnets = vpc.private_subnets

        if public_subnets is None:
            public_subnets = vpc.public_subnets

        if public_security_group is None:
            public_security_group = aws_ec2.SecurityGroup(
                self, 'PublicSecurityGroup', vpc=vpc, allow_all_outbound=True)
            # Allow inbound HTTP traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=80))
            # Allow inbound HTTPS traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=443))

        if private_security_group is None:
            private_security_group = aws_ec2.SecurityGroup(
                self, 'PrivateSecurityGroup', vpc=vpc, allow_all_outbound=True)

            public_subnet_cidr_blocks = Utils.get_subnet_cidr_blocks(
                public_subnets)

            # Create an ingress rule for each of the NLB's subnet's CIDR ranges and add the rules to the ECS service's
            # security group.  This will allow requests from the NLB to go into the ECS service.  This allow inbound
            # traffic from public subnets.
            for cidr_block in public_subnet_cidr_blocks:
                private_security_group.add_ingress_rule(
                    peer=aws_ec2.Peer.ipv4(cidr_ip=cidr_block),
                    connection=aws_ec2.Port.tcp(port=container_port))

        if fargate_cluster is None:
            fargate_cluster = aws_ecs.Cluster(
                self,
                'FargateCluster',
            )

        task_def = aws_ecs.FargateTaskDefinition(
            self,
            'TaskDefinition',
            cpu=task_definition_cpu,
            memory_limit_mib=task_definition_memory_limit_mib,
            task_role=self.role,
            execution_role=self.role)

        container = aws_ecs.ContainerDefinition(
            self,
            'Container',
            image=aws_ecs.ContainerImage.from_registry(name=docker_image_name),
            task_definition=task_def,
            logging=aws_ecs.AwsLogDriver(stream_prefix='/ecs'))
        container.add_port_mappings(
            aws_ecs.PortMapping(container_port=container_port,
                                protocol=aws_ec2.Protocol.TCP))

        ecs_service = aws_ecs.FargateService(
            self,
            'FargateService',
            cluster=fargate_cluster,
            task_definition=task_def,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=private_subnets),
            security_group=private_security_group,
            desired_count=desired_container_count)

        target_group = aws_elasticloadbalancingv2.NetworkTargetGroup(
            self,
            'TargetGroup',
            port=80,  # Health check occurs over HTTP
            health_check=aws_elasticloadbalancingv2.HealthCheck(
                protocol=aws_elasticloadbalancingv2.Protocol.TCP),
            targets=[ecs_service],
            vpc=vpc)

        nlb = aws_elasticloadbalancingv2.NetworkLoadBalancer(
            self,
            'NetworkLoadBalancer',
            vpc=vpc,
            internet_facing=False,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=public_subnets),
        )
        nlb.add_listener(
            id='Listener',
            port=80,  # HTTP listener
            default_target_groups=[target_group])

        # nlb.log_access_logs(  # todo:  add this later when you have time to research the correct bucket policy.
        #     bucket=aws_s3.Bucket(
        #         self, 'LoadBalancerLogBucket',
        #         bucket_name='load-balancer-logs',
        #         public_read_access=False,
        #         block_public_access=aws_s3.BlockPublicAccess(
        #             block_public_policy=True,
        #             restrict_public_buckets=True
        #         )
        #     )
        # )

        # Dependencies
        ecs_service.node.add_dependency(nlb)

        # API Gateway
        rest_api = aws_apigateway.RestApi(self, stack_name)
        resource = rest_api.root.add_resource(
            path_part='{proxy+}',
            default_method_options=aws_apigateway.MethodOptions(
                request_parameters={'method.request.path.proxy': True}))

        token_authorizer = None
        if authorizer_lambda_arn and authorizer_lambda_role_arn:
            token_authorizer = aws_apigateway.TokenAuthorizer(  #todo: make this a parameter?
                self,
                'JwtTokenAuthorizer',
                results_cache_ttl=core.Duration.minutes(5),
                identity_source='method.request.header.Authorization',
                assume_role=aws_iam.Role.from_role_arn(
                    self,
                    'AuthorizerLambdaInvokationRole',
                    role_arn=authorizer_lambda_role_arn),
                handler=aws_lambda.Function.from_function_arn(
                    self,
                    'AuthorizerLambda',
                    function_arn=authorizer_lambda_arn))

        resource.add_method(
            http_method='ANY',
            authorization_type=aws_apigateway.AuthorizationType.CUSTOM,
            authorizer=token_authorizer,
            integration=aws_apigateway.HttpIntegration(
                url=f'http://{nlb.load_balancer_dns_name}/{{proxy}}',
                http_method='ANY',
                proxy=True,
                options=aws_apigateway.IntegrationOptions(
                    request_parameters={
                        'integration.request.path.proxy':
                        'method.request.path.proxy'
                    },
                    connection_type=aws_apigateway.ConnectionType.VPC_LINK,
                    vpc_link=aws_apigateway.VpcLink(
                        self,
                        'VpcLink',
                        description=
                        f'API Gateway VPC Link to internal NLB for {stack_name}',
                        vpc_link_name=stack_name,
                        targets=[nlb]))))
예제 #17
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 vpc,
                 target_url="",
                 slaves=2,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster = ecs.Cluster(self, "cluster", vpc=vpc)

        locust_asset = ecr_assets.DockerImageAsset(self,
                                                   'locust',
                                                   directory="docker",
                                                   file="app/Dockerfile")

        master_task = ecs.FargateTaskDefinition(self, "mastert")

        sg_slave = ec2.SecurityGroup(self,
                                     "sgslave",
                                     vpc=vpc,
                                     allow_all_outbound=True)

        sg_master = ec2.SecurityGroup(self,
                                      "sgmaster",
                                      vpc=vpc,
                                      allow_all_outbound=True)
        sg_master.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8089))
        sg_master.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5557))

        master_container = master_task.add_container(
            "masterc",
            image=ecs.ContainerImage.from_ecr_repository(
                locust_asset.repository, locust_asset.image_uri[-64:]),
            logging=ecs.LogDriver.aws_logs(stream_prefix="master"),
            environment={
                "LOCUST_MODE": "master",
                "TARGET_URL": target_url
            })

        master_container.add_port_mappings(
            ecs.PortMapping(container_port=8089, host_port=8089))
        master_container.add_port_mappings(
            ecs.PortMapping(container_port=5557, host_port=5557))

        master_service = ecs.FargateService(self,
                                            "masters",
                                            cluster=cluster,
                                            task_definition=master_task,
                                            desired_count=1,
                                            assign_public_ip=True,
                                            security_group=sg_master)

        nlb = elbv2.NetworkLoadBalancer(self,
                                        "nbalancer",
                                        internet_facing=True,
                                        vpc=vpc)

        listener_master_console = nlb.add_listener(
            "masterconsole", port=8089, protocol=elbv2.Protocol("TCP"))

        listener_console = nlb.add_listener("master",
                                            port=5557,
                                            protocol=elbv2.Protocol("TCP"))

        listener_master_console.add_targets(
            "consoletarget",
            deregistration_delay=core.Duration.seconds(1),
            port=8089,
            targets=[
                master_service.load_balancer_target(container_name="masterc",
                                                    container_port=8089)
            ],
            health_check=elbv2.HealthCheck(healthy_threshold_count=2,
                                           unhealthy_threshold_count=2,
                                           timeout=core.Duration.seconds(10)))

        listener_console.add_targets(
            "mastertarget",
            deregistration_delay=core.Duration.seconds(1),
            port=5557,
            targets=[
                master_service.load_balancer_target(container_name="masterc",
                                                    container_port=5557)
            ],
            health_check=elbv2.HealthCheck(healthy_threshold_count=2,
                                           unhealthy_threshold_count=2,
                                           timeout=core.Duration.seconds(10)))

        slave_task = ecs.FargateTaskDefinition(self, "slavet")

        slave_task.add_container(
            "slavec",
            image=ecs.ContainerImage.from_ecr_repository(
                locust_asset.repository, locust_asset.image_uri[-64:]),
            logging=ecs.LogDriver.aws_logs(stream_prefix="slave"),
            environment={
                "LOCUST_MODE": "slave",
                "TARGET_URL": target_url,
                "LOCUST_MASTER_HOST": nlb.load_balancer_dns_name
            })

        ecs.FargateService(self,
                           "slaves",
                           cluster=cluster,
                           task_definition=slave_task,
                           desired_count=slaves,
                           assign_public_ip=True,
                           security_group=sg_slave)

        core.CfnOutput(self,
                       "LocustWebConsole",
                       value="http://" + nlb.load_balancer_dns_name + ":8089")