def create_elb(self):
        lb = elb.LoadBalancer(self,
                              guid('ELB-'),
                              vpc=self.vpc,
                              cross_zone=True,
                              internet_facing=True,
                              health_check=elb.HealthCheck(port=80))

        lb.add_target(self.auto_scaling_group)
        lb.add_listener(external_port=80)

        return lb
Exemple #2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        sg = ec2.SecurityGroup(
            self, "SG",
            description='Allow ssh access to ec2 instances',
            vpc=vpc
        )

        sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(22)
        )

        asg = autoscaling.AutoScalingGroup(
            self, "ASG",
            vpc=vpc,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO
            ),
            machine_image=ec2.AmazonLinuxImage(),
            desired_capacity=3,
        )

        lb = elb.LoadBalancer(
            self, "LB",
            vpc=vpc,
            internet_facing=True,
            health_check={"port": 80}
        )
        lb.add_target(asg)

        ec2instance = ec2.Instance(
            self, "EC2INSTANCE",
            vpc=vpc,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO
            ),
            machine_image=ec2.AmazonLinuxImage(),
            vpc_subnets={ 'subnet_type': ec2.SubnetType.PUBLIC },
            security_group=sg,
            key_name="MyNVKeyPair"
        )

        listener = lb.add_listener(external_port=80)
        listener.connections.allow_default_port_from_any_ipv4("Open to the world")
Exemple #3
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        vpc: aws_ec2.IVpc,
        instance_port: int,
        internet_facing: bool,
        subnets: typing.List[aws_ec2.ISubnet],
        targets: typing.List[aws_elb.ILoadBalancerTarget],
        ssl_certificate_id: typing.Optional[str],
        allow_connections_from: typing.Optional[typing.List[
            aws_ec2.IConnectable]] = None,
    ) -> None:
        super().__init__(scope, id)

        self.elb = aws_elb.LoadBalancer(
            scope=self,
            id='elb',
            vpc=vpc,
            internet_facing=internet_facing,
            subnet_selection=aws_ec2.SubnetSelection(subnets=subnets),
            targets=targets,
            health_check=aws_elb.HealthCheck(
                port=instance_port,
                path='/healthz',
                protocol=aws_elb.LoadBalancingProtocol.HTTP,
                healthy_threshold=2,
                interval=core.Duration.seconds(amount=10),
                timeout=core.Duration.seconds(amount=5),
                unhealthy_threshold=3,
            ),
        )
        self.elb.add_listener(
            external_port=80,
            external_protocol=aws_elb.LoadBalancingProtocol.HTTP,
            internal_port=instance_port,
            internal_protocol=aws_elb.LoadBalancingProtocol.HTTP,
            allow_connections_from=allow_connections_from,
        )
        if ssl_certificate_id:
            self.elb.add_listener(
                external_port=443,
                external_protocol=aws_elb.LoadBalancingProtocol.HTTPS,
                internal_port=instance_port,
                internal_protocol=aws_elb.LoadBalancingProtocol.HTTP,
                allow_connections_from=allow_connections_from,
                ssl_certificate_id=ssl_certificate_id,
            )
Exemple #4
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        # creating a machine image
        ami = ec2.MachineImage.latest_amazon_linux(
            cpu_type=ec2.AmazonLinuxCpuType.X86_64,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE,
        )

        # creating vpc
        vpc = ec2.Vpc.from_lookup(self, "vpc", is_default=True)

        # creating security group
        skull_sg = ec2.SecurityGroup(
            self,
            "myec2sg",
            vpc=vpc,
            security_group_name="skull-sg",
            description="sg for ec2 cdk example",
            allow_all_outbound=True,
        )

        # creating autoscaling group
        asg = autoscaling.AutoScalingGroup(
            self,
            "autoscaling",
            instance_type=ec2.InstanceType("t2.micro"),
            machine_image=ami,
            vpc=vpc,
            security_group=skull_sg,
        )

        # creating load balancer
        load_balancer = elb.LoadBalancer(
            self, "loadbalancer", internet_facing=True, vpc=vpc
        )

        # adding listeners to load balancer
        listener = load_balancer.add_listener(
            external_port=80, external_protocol=elb.LoadBalancingProtocol.HTTP
        )

        # add targets for load balancer such as the autoscaling group created above
        load_balancer.add_target(asg)
Exemple #5
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        vpc = ec2.Vpc(self, "VPC")

        asg = autoscaling.AutoScalingGroup(
            self, "ASG",
            vpc=vpc,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO
            ),
            machine_image=ec2.AmazonLinuxImage(),
        )

        lb = elb.LoadBalancer(
            self, "LB",
            vpc=vpc,
            internet_facing=True,
            health_check={"port": 80}
        )
        lb.add_target(asg)

        listener = lb.add_listener(external_port=80)
        listener.connections.allow_default_port_from_any_ipv4("Open to the world")
Exemple #6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, tags=default_tags, **kwargs)

        # VPC
        vpc = ec2.Vpc(
            self,
            'k8s-real-hard-way-vpc',
            cidr=vpc_cidr,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    cidr_mask=24,
                    name='Public',
                    subnet_type=ec2.SubnetType.PUBLIC,

                ),
                ec2.SubnetConfiguration(
                    cidr_mask=24,
                    name='Private',
                    subnet_type=ec2.SubnetType.PRIVATE
                )
            ]
        )

        # Ubuntu AMI from dict mapping
        ubuntu_ami = ec2.GenericLinuxImage(
            ami_map={
                aws_region: ami_region_map.get(aws_region)
            }
        )

        # Get HostedZone ID from HostedZone Name
        zoneid = route53.HostedZone.from_lookup(
            self,
            "k8s-real-hard-way-zone",
            domain_name=zone_fqdn
        )
        zoneid_str = zoneid.hosted_zone_id

        # IAM Policy for Bastion Instance Profile
        iampolicystatement = iam.PolicyStatement(
            actions=[
                "ec2:CreateRoute",
                "ec2:CreateTags",
                "ec2:DescribeAutoScalingGroups",
                "autoscaling:DescribeAutoScalingInstances",
                "ec2:DescribeRegions",
                "ec2:DescribeRouteTables",
                "ec2:DescribeInstances",
                "ec2:DescribeTags",
                "elasticloadbalancing:DescribeLoadBalancers",
                "route53:ListHostedZonesByName"
            ],
            effect=iam.Effect.ALLOW,
            resources=[
                "*"
            ]
        )
        iampolicystatement_route53 = iam.PolicyStatement(
            actions=[
                "route53:ChangeResourceRecordSets"
            ],
            effect=iam.Effect.ALLOW,
            resources=[
                "arn:aws:route53:::" + zoneid_str[1:]
            ]
        )
        # BASTION HOST
        # AutoScalingGroup
        bastion = autoscaling.AutoScalingGroup(
            self,
            "bastion",
            vpc=vpc,
            min_capacity=bastion_min_capacity,
            max_capacity=bastion_max_capacity,
            desired_capacity=bastion_desired_capacity,
            instance_type=ec2.InstanceType(bastion_instance_type),
            machine_image=ec2.AmazonLinuxImage(),
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        bastion.add_to_role_policy(iampolicystatement)
        bastion.add_to_role_policy(iampolicystatement_route53)

        cfn_bastion = bastion.node.default_child
        cfn_bastion.auto_scaling_group_name = "bastion"
        cfn_bastion_lc = bastion.node.find_child('LaunchConfig')
        cfn_bastion_lc.launch_configuration_name = "bastion"

        # Classic LoadBalancer
        bastion_lb = elb.LoadBalancer(
            self,
            "bastion-lb",
            vpc=vpc,
            internet_facing=True,
            health_check=elb.HealthCheck(
                port=22,
                protocol=elb.LoadBalancingProtocol.TCP
            )
        )

        cfn_bastion_lb = bastion_lb.node.default_child
        cfn_bastion_lb.load_balancer_name = "bastion"

        bastion_lb.add_listener(
            external_port=22,
            external_protocol=elb.LoadBalancingProtocol.TCP,
            allow_connections_from=[ec2.Peer().ipv4(myipv4)]
        )
        bastion_lb.add_target(
            target=bastion
        )
        # UserData
        bastion.add_user_data(
            "sudo yum update",
            "sudo yum upgrade -y",
            "sudo yum install jq tmux -y",
            "wget https://gist.githubusercontent.com/dmytro/3984680/raw/1e25a9766b2f21d7a8e901492bbf9db672e0c871/ssh-multi.sh -O /home/ec2-user/tmux-multi.sh",
            "chmod +x /home/ec2-user/tmux-multi.sh",
            "wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 && chmod +x cfssl_linux-amd64 && sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl && sudo chown ec2-user:ec2-user /usr/local/bin/cfssl",
            "wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 && chmod +x cfssljson_linux-amd64 && sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson && sudo chown ec2-user:ec2-user /usr/local/bin/cfssljson",
            "curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl && sudo mv kubectl /usr/local/bin/kubectl && chown ec2-user:ec2-user /usr/local/bin/kubectl",
            "sudo hostname " + "bastion" + "." + zone_fqdn,
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment"
        )
        # Route53 Alias Target for LB
        route53_target = route53_targets.ClassicLoadBalancerTarget(bastion_lb)
        # Route53 Record for Bastion Host LB
        route53_bastion = route53.ARecord(
            self,
            "bastion-lb-route53",
            target=route53.RecordTarget.from_alias(route53_target),
            zone=zoneid,
            comment="Bastion Host LB",
            record_name='bastion'
        )

        # ETCD
        # AutoScalingGroup
        etcd = autoscaling.AutoScalingGroup(
            self,
            "etcd",
            vpc=vpc,
            min_capacity=etcd_min_capacity,
            max_capacity=etcd_max_capacity,
            desired_capacity=etcd_desired_capacity,
            instance_type=ec2.InstanceType(etcd_instance_type),
            machine_image=ubuntu_ami,
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        etcd.add_to_role_policy(iampolicystatement)

        cfn_etcd = etcd.node.default_child
        cfn_etcd.auto_scaling_group_name = "etcd"
        cfn_etcd_lc = etcd.node.find_child('LaunchConfig')
        cfn_etcd_lc.launch_configuration_name = "etcd"

        # UserData
        etcd.add_user_data(
            "sudo apt-get update",
            "sudo apt-get upgrade -y",
            "sudo apt-get install python3-pip -y",
            "sudo pip3 install awscli",
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment",
            "echo \"INTERNAL_IP=$(curl -s http://169.254.169.254/1.0/meta-data/local-ipv4)\" | sudo tee -a /etc/environment"
        )

        # KUBERNETES MASTER Load Balancer
        # Public Load Balancer (for remote kubectl access)
        master_public_lb = elb.LoadBalancer(
            self,
            "k8s-real-hard-way-master-public-lb",
            vpc=vpc,
            internet_facing=True,
            health_check=elb.HealthCheck(
                port=6443,
                protocol=elb.LoadBalancingProtocol.TCP
            )
        )
        master_public_lb.add_listener(
            external_port=6443,
            external_protocol=elb.LoadBalancingProtocol.TCP,
            allow_connections_from=[ec2.Peer().ipv4(myipv4)]
        )

        cfn_master_public_lb = master_public_lb.node.default_child
        cfn_master_public_lb.load_balancer_name = "master-public"

        # Private Load Balancer (fronting kube-apiservers)
        master_private_lb = elb.LoadBalancer(
            self,
            "k8s-real-hard-way-master-private-lb",
            vpc=vpc,
            internet_facing=False,
            health_check=elb.HealthCheck(
                port=6443,
                protocol=elb.LoadBalancingProtocol.TCP
            )
        )
        master_private_lb.add_listener(
            external_port=6443,
            external_protocol=elb.LoadBalancingProtocol.TCP,
            allow_connections_from=[]
        )

        cfn_master_private_lb = master_private_lb.node.default_child
        cfn_master_private_lb.load_balancer_name = "master-private"

        # AutoScalingGroup
        master = autoscaling.AutoScalingGroup(
            self,
            "master",
            vpc=vpc,
            min_capacity=master_min_capacity,
            max_capacity=master_max_capacity,
            desired_capacity=master_desired_capacity,
            instance_type=ec2.InstanceType(master_instance_type),
            machine_image=ubuntu_ami,
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        master.add_to_role_policy(iampolicystatement)

        cfn_master = master.node.default_child
        cfn_master.auto_scaling_group_name = "master"
        cfn_master_lc = master.node.find_child('LaunchConfig')
        cfn_master_lc.launch_configuration_name = "master"

        # Add ASG as target for LBs
        master_public_lb.add_target(
            target=master
        )
        master_private_lb.add_target(
            target=master
        )
        # UserData
        master.add_user_data(
            "sudo apt-get update",
            "sudo apt-get upgrade -y",
            "sudo apt-get install python3-pip -y",
            "sudo pip3 install awscli",
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment",
            "echo \"INTERNAL_IP=$(curl -s http://169.254.169.254/1.0/meta-data/local-ipv4)\" | sudo tee -a /etc/environment"
        )

        # KUBERNETES WORKER
        worker = autoscaling.AutoScalingGroup(
            self,
            "worker",
            vpc=vpc,
            min_capacity=worker_min_capacity,
            max_capacity=worker_max_capacity,
            desired_capacity=worker_desired_capacity,
            instance_type=ec2.InstanceType(worker_instance_type),
            machine_image=ubuntu_ami,
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        worker.add_to_role_policy(iampolicystatement)

        cfn_worker = worker.node.default_child
        cfn_worker.auto_scaling_group_name = "worker"
        cfn_worker_lc = worker.node.find_child('LaunchConfig')
        cfn_worker_lc.launch_configuration_name = "worker"

        # UserData
        worker.add_user_data(
            "sudo apt-get update",
            "sudo apt-get upgrade -y",
            "sudo apt-get install python3-pip -y",
            "sudo pip3 install awscli",
            "RANDOM_NUMBER=$(shuf -i 10-250 -n 1)",
            "echo \"POD_CIDR=" + pod_cidr + ".$RANDOM_NUMBER.0/24\" | sudo tee -a /etc/environment",
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment",
            "echo \"INTERNAL_IP=$(curl -s http://169.254.169.254/1.0/meta-data/local-ipv4)\" | sudo tee -a /etc/environment"
        )

        # SecurityGroups
        # Bastion LB
        bastion_lb_sg = ec2.SecurityGroup(
            self,
            "bastion-lb-sg",
            vpc=vpc,
            allow_all_outbound=True,
            description="Bastion-LB",
        )
        # Kubernetes Master Public LB
        master_public_lb_sg = ec2.SecurityGroup(
            self,
            "k8s-real-hard-way-master-public-lb-sg",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s MasterPublicLB",
        )
        # Kubernetes Master Private LB
        master_private_lb_sg = ec2.SecurityGroup(
            self,
            "k8s-real-hard-way-master-private-lb-sg",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s MasterPrivateLB",
        )
        # Bastion
        bastion_security_group = ec2.SecurityGroup(
            self,
            "bastion-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="Bastion"
        )
        # etcd
        etcd_security_group = ec2.SecurityGroup(
            self,
            "etcd-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="etcd"
        )
        # Kubernetes Master
        master_securiy_group = ec2.SecurityGroup(
            self,
            "master-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s Master",
        )
        # Kubernetes Worker
        worker_security_group = ec2.SecurityGroup(
            self,
            "worker-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s Worker"
        )

        # SecurityGroup Rules
        # Bastion LB
        bastion_lb_sg.add_ingress_rule(
            peer=ec2.Peer().ipv4(myipv4),
            connection=ec2.Port.tcp(22),
            description="SSH: Workstation - MasterPublicLB"
        )
        # Master Public LB
        master_public_lb_sg.add_ingress_rule(
            peer=ec2.Peer().ipv4(myipv4),
            connection=ec2.Port.tcp(6443),
            description="kubectl: Workstation - MasterPublicLB"
        )
        master_public_lb_sg.add_ingress_rule(
            peer=master_securiy_group,
            connection=ec2.Port.tcp(6443),
            description="kubeapi: Workers - MasterPublicLB"
        )
        # Master Private LB
        # master_private_lb_sg.add_ingress_rule(
        #     peer=master_securiy_group,
        #     connection=ec2.Port.tcp(6443),
        #     description="kubectl: Masters - MasterPrivateLB"
        # )
        # master_private_lb_sg.add_ingress_rule(
        #     peer=worker_security_group,
        #     connection=ec2.Port.tcp(6443),
        #     description="kubeapi: Workers - MasterPrivateLB"
        # )
        master_private_lb_sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(6443),
            description="kubectl: ALL - MasterPrivateLB"
        )
        # Bastion Host
        bastion_security_group.add_ingress_rule(
            peer=bastion_lb_sg,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion-LB - Bastio"
        )
        # etcd
        etcd_security_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion - Etcds"
        )
        etcd_security_group.add_ingress_rule(
            peer=master_securiy_group,
            connection=ec2.Port.tcp_range(start_port=2379, end_port=2380),
            description="etcd: Masters - Etcds"
        )
        etcd_security_group.add_ingress_rule(
            peer=etcd_security_group,
            connection=ec2.Port.tcp_range(start_port=2379, end_port=2380),
            description="etcd: Etcds - Etcds"
        )
        # K8s-Master
        master_securiy_group.add_ingress_rule(
            peer=worker_security_group,
            connection=ec2.Port.all_traffic(),
            description="ALL: Workers - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(6443),
            description="kubectl: Bastion - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=master_public_lb_sg,
            connection=ec2.Port.tcp(6443),
            description="kubectl: MasterPublicLB - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=master_private_lb_sg,
            connection=ec2.Port.tcp(6443),
            description="kubectl: MasterPrivateLB - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=worker_security_group,
            connection=ec2.Port.tcp(6443),
            description="kubectl: Workers - Masters"
        )
        # K8s-Worker
        worker_security_group.add_ingress_rule(
            peer=master_securiy_group,
            connection=ec2.Port.all_traffic(),
            description="ALL: Master - Workers"
        )
        worker_security_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion - Workers"
        )
        worker_security_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(6443),
            description="kubectl: Bastion - Workers"
        )

        # Add SecurityGroups to resources
        bastion.add_security_group(bastion_security_group)
        etcd.add_security_group(etcd_security_group)
        master.add_security_group(master_securiy_group)
        worker.add_security_group(worker_security_group)
        cfn_master_public_lb.security_groups = [
            master_public_lb_sg.security_group_id
        ]
        cfn_master_private_lb.security_groups = [
            master_private_lb_sg.security_group_id
        ]

        # Add specific Tags to resources
        core.Tag.add(
            bastion,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-bastion'
        )
        core.Tag.add(
            bastion_lb,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-bastion-lb'
        )
        core.Tag.add(
            master_public_lb,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-master-lb'
        )
        core.Tag.add(
            etcd,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-etcd'
        )
        core.Tag.add(
            master,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-k8s-master'
        )
        core.Tag.add(
            worker,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-k8s-worker'
        )
        for subnet in vpc.private_subnets:
            core.Tag.add(
                subnet,
                key='Attribute',
                value='private'
            )
        for subnet in vpc.public_subnets:
            core.Tag.add(
                subnet,
                key='Attribute',
                value='public'
            )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #return userdata script
        def get_userdata():
            with open('userdata.sh') as userdata:
                script = userdata.read()
            return script

        #get public subnet
        def get_public_subnet():
            public_subnet = covid_free_vpc.select_subnets(
                subnet_type=ec2_stacker.SubnetType.PUBLIC)
            return public_subnet.subnets[0]

        def get_web_sg():
            return covid_free_sg

        #Generate 4 EC2 Instances
        def generate_instances(count=4):
            amazon_linux_2 = ec2_stacker.GenericLinuxImage(
                {"us-east-1": "ami-0fc61db8544a617ed"})

            ec2_objects = []
            for i in range(count):
                ec2_instnace = ec2_stacker.Instance(
                    self,
                    f"Instance number {i}",
                    instance_type=ec2_stacker.InstanceType('t2.micro'),
                    machine_image=amazon_linux_2,
                    vpc=covid_free_vpc,
                    security_group=get_web_sg(),
                    user_data=ec2_stacker.UserData.custom(get_userdata()))
                ec2_objects.append(ec2_instnace)
            return ec2_objects

        #utilty script, fetches instance ids/ references in cloudformation
        def get_instance_reference_ids():
            data = generate_instances()
            for i in data:
                yield (i.instance_id)

        #create vpc with public & private subnet
        covid_free_vpc = ec2_stacker.Vpc(
            self,
            'CovidFreeVPC',
            enable_dns_support=True,
            enable_dns_hostnames=True,
            max_azs=3,
            subnet_configuration=[
                ec2_stacker.SubnetConfiguration(
                    subnet_type=ec2_stacker.SubnetType.PUBLIC,
                    name='InternetFacingSubnetGroup',
                    cidr_mask=24)
            ])
        #ec2_stacker.SubnetConfiguration(subnet_type=ec2_stacker.SubnetType.PRIVATE, name='PrivateSubnetGroup',  cidr_mask=24 )
        #])

        #creating security group requires vpc param
        covid_free_sg = ec2_stacker.SecurityGroup(self,
                                                  'CovidFreeSG',
                                                  allow_all_outbound=True,
                                                  vpc=covid_free_vpc)
        covid_free_sg.add_ingress_rule(peer=ec2_stacker.Peer.any_ipv4(),
                                       connection=ec2_stacker.Port.tcp(80),
                                       description="allow http traffic")
        covid_free_sg.add_ingress_rule(ec2_stacker.Peer.any_ipv4(),
                                       ec2_stacker.Port.tcp(22),
                                       description="allow ssh traffic")

        #create launch config (userdata is also a valid param)
        covid_free_lc = ats_stacker.CfnLaunchConfiguration(
            self,
            'CovidFreeLC',
            instance_type='t2.micro',
            image_id='ami-0fc61db8544a617ed')

        #set up autoscaling group - better to add userdata from asg, does not throw base64 error. Able to use strings
        instance_type = ec2_stacker.InstanceType.of(
            ec2_stacker.InstanceClass.BURSTABLE2,
            ec2_stacker.InstanceSize.MICRO)
        amazon_linux_2 = ec2_stacker.GenericLinuxImage(
            {"us-east-1": "ami-0fc61db8544a617ed"})
        covid_free_asg = ats_stacker.AutoScalingGroup(
            self,
            'CovidFreeASG',
            vpc=covid_free_vpc,
            associate_public_ip_address=True,
            key_name="CoronaVirusKP",
            instance_type=instance_type,
            machine_image=amazon_linux_2,
            min_capacity=5,
            max_capacity=10)
        covid_free_asg.add_user_data(get_userdata())

        #Register targets to ELB is an Autoscaling group, must be a list/array ot targets
        elb = elb_stacker.LoadBalancer(self,
                                       'CovidFreeELB',
                                       cross_zone=True,
                                       vpc=covid_free_vpc,
                                       health_check={"port": 80},
                                       internet_facing=True,
                                       subnet_selection=get_public_subnet(),
                                       targets=[covid_free_asg])
        elb.add_listener(external_port=80)