Exemplo n.º 1
0
 def _add_autoscaling_group(
   self,
   vpc: ec2.Vpc,
   public_subnet,
   security_group: ec2.SecurityGroup,
   key_name: str
 ) -> autoscaling.AutoScalingGroup:
   """
   Add autoscaling group for running ec2 instance automatically
   """
   Zach_Ec2_InstanceImage = ec2.GenericLinuxImage(ami_map={"ap-southeast-1":"ami-048a01c78f7bae4aa"})
   ZachInitInstall=ec2.UserData.for_linux()
   ZachInitInstall.add_commands("yum install -y psmisc nginx")
   group = autoscaling.AutoScalingGroup(
     self,
     'zach-autoscale',
     vpc=vpc,
     instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO),
     machine_image=Zach_Ec2_InstanceImage,
     max_capacity=2,
     vpc_subnets=public_subnet,
     associate_public_ip_address=True,
     key_name=key_name,
     user_data=ec2.UserData.add_commands(self,"yum install -y psmisc nginx")
   )
   group.add_security_group(security_group)
   core.CfnOutput(self, "ASG-GROUP-ARN", export_name="ASG-GROUP-ARN", value=group.auto_scaling_group_arn)
   core.CfnOutput(self, "ASG-GROUP", export_name="ASG-GROUP", value=group.auto_scaling_group_name)
   return group
Exemplo n.º 2
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, config: dict, region: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self._region = region

        ### EC2 Server for Jenkins
        image = ec2.GenericLinuxImage(
            {
                region: config["ami_id"],
            },
        )

        self._role = iam.Role(self, "InstanceRole", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        for policy in config["iam_role_policies"]:
            self._role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(policy))

        subnet = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE).subnets[0]
        subnet_selection = ec2.SubnetSelection(subnets=[subnet])

        self.security_group = ec2.SecurityGroup(
            self, "EC2SG",
            vpc=vpc
        )

        self._instance = ec2.Instance(
            self, "EC2",
            instance_type=ec2.InstanceType(config["instance_type"]),
            machine_image=image,
            vpc=vpc,
            vpc_subnets=subnet_selection,
            role=self._role,
            security_group=self.security_group
        )

        core.CfnOutput(self, "CodeServerInstanceID", value=self._instance.instance_id)
    def __create_asg(self, user_data: str, vpc: ec2.Vpc):
        subnets = ec2.SubnetSelection(one_per_az=True,
                                      subnet_type=ec2.SubnetType.PUBLIC)
        asg = autoscaling.AutoScalingGroup(
            self,
            id=common.generate_id("ImagizerAutoscalingGroup"),
            vpc=vpc,
            vpc_subnets=subnets,
            instance_type=ec2.InstanceType.of(
                instance_class=ec2.InstanceClass[variables.ASG_INSTANCE_TYPE],
                instance_size=ec2.InstanceSize[variables.ASG_INSTANCE_SIZE]),
            machine_image=ec2.GenericLinuxImage(
                ami_map={self.region: variables.IMAGIZER_AMI_ID}),
            user_data=ec2.UserData.custom(user_data),
            update_type=autoscaling.UpdateType.ROLLING_UPDATE,
            health_check=autoscaling.HealthCheck.ec2(
                grace=core.Duration.seconds(
                    variables.ASG_HEALTH_CHECK_GRACE_PERIOD)),
            cooldown=core.Duration.seconds(
                variables.ASG_HEALTH_CHECK_GRACE_PERIOD),
            min_capacity=variables.ASG_MIN_CAPACITY,
            max_capacity=variables.ASG_MAX_CAPACITY,
            rolling_update_configuration=autoscaling.
            RollingUpdateConfiguration(
                min_instances_in_service=variables.ASG_ROLL_OUT_BATCH_SIZE,
                max_batch_size=variables.ASG_ROLL_OUT_BATCH_SIZE,
                wait_on_resource_signals=True,
                pause_time=core.Duration.minutes(
                    variables.ASG_ROLL_OUT_PATCH_MINUTES)))

        common.add_tags(self, asg, variables.IMAGIZER_CLUSTER_TAGS)
        return asg
Exemplo n.º 4
0
    def __init__(self, scope: core.Construct, id: str, vpc, config,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")
        name = config['ec2']['name']
        key = config['ec2']['ssh_key']

        ubuntu_ami = ec2.GenericLinuxImage(
            {"ap-southeast-1": "ami-028be27cf930f7a43"})

        # Create bastion host
        self.bastion = ec2.Instance(
            self,
            'Instance',
            instance_type=ec2.InstanceType("t3.small"),
            instance_name=f"{name}-bastion",
            key_name=f"{key}",
            machine_image=ubuntu_ami,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
        )
        self.bastion.apply_removal_policy(core.RemovalPolicy.DESTROY)

        self.bastion.connections.allow_from_any_ipv4(
            port_range=ec2.Port.tcp(22),
            description='Allow public SSH connections')
        self.bastion.connections.allow_from_any_ipv4(
            port_range=ec2.Port.icmp_ping(),
            description='Allow public ICMP ping')

        core.CfnOutput(self,
                       f'{name}-private-ip',
                       value=self.bastion.instance_private_ip)
Exemplo n.º 5
0
    def __init__(self, scope: core.Construct, id: str, vpc, bastion, config,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")
        name = config['ec2']['name']
        key = config['ec2']['ssh_key']

        ubuntu_ami = ec2.GenericLinuxImage(
            {"ap-southeast-1": "ami-028be27cf930f7a43"})

        # Create security group for webapp instances
        webapp_sg = ec2.SecurityGroup(self,
                                      'webapp-sg',
                                      vpc=vpc,
                                      security_group_name=prj_name + env_name +
                                      '-webapp-sg',
                                      description="SG for webapp Instances",
                                      allow_all_outbound=True)

        webapp_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4(f"{bastion.instance_private_ip}/32"),
            connection=ec2.Port.tcp(22),
            description='Allow all bastion instance to SSH')

        for subnet in vpc.public_subnets:
            webapp_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(80),
                description=
                'Allow ELB public subnets to access webapp instances')

        # Create launch template and attach to autoscaling group
        webapp_launch_template = ec2.LaunchTemplate(
            self,
            'launch-template',
            detailed_monitoring=False,
            ebs_optimized=False,
            instance_type=ec2.InstanceType("t3.small"),
            launch_template_name=f"{name}-launch-template",
            key_name=f"{key}",
            machine_image=ubuntu_ami,
            security_group=webapp_sg)

        self.webapp_asg = autoscaling.AutoScalingGroup(
            self,
            'webapp-asg',
            vpc=vpc,
            auto_scaling_group_name=prj_name + env_name + '-webapp-asg',
            instance_type=ec2.InstanceType("t3.small"),
            machine_image=ubuntu_ami,
            min_capacity=1,
            max_capacity=1,
            desired_capacity=1,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE))
Exemplo n.º 6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # lookup existing VPC
        vpc = ec2.Vpc(
            self,
            "yardstiqVpc",
        )

        # create a new security group
        sec_group = ec2.SecurityGroup(
            self,
            "sec-group-allow-ssh",
            vpc=vpc,
            allow_all_outbound=True,
        )

        # add a new ingress rule to allow port 22 to internal hosts
        sec_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),  #('10.0.0.0/16'),
            description="Allow SSH connection",
            connection=ec2.Port.tcp(22))

        # Define commands to run on startup
        user_data = ec2.UserData.for_linux()

        #        command = """
        #        git clone https://github.com/Roger-luo/quantum-benchmarks > /home/ubuntu/yardstiq.log \
        #                && cd quantum-benchmarks >> /home/ubuntu/yardstiq.log \
        #                && bin/benchmark setup >> /home/ubuntu/yardstiq.log  \
        #                && bin/benchmark benchmark >> /home/ubuntu/yardstiq.log
        #        """

        command = """
        echo "Hello World" >> /home/ubuntu/yardstiq.log
        echo "sudo halt" | at now + 1 minutes
        """

        user_data.add_commands(command)

        # define a new ec2 instance
        ec2_instance = ec2.Instance(
            self,
            "ec2-instance",
            key_name="yardstiqPem",
            instance_name=instanceName,
            instance_type=ec2.InstanceType(instanceType),
            machine_image=ec2.GenericLinuxImage(
                {'us-west-1': 'ami-031b673f443c2172c'}),
            vpc=vpc,
            security_group=sec_group,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            user_data=user_data,
            user_data_causes_replacement=True)
        def generate_instances(count=4):
            amazon_linux_2 = ec2_stacker.GenericLinuxImage(
                {"us-east-1": "ami-0fc61db8544a617ed"})

            ec2_objects = []
            for i in range(count):
                ec2_instnace = ec2_stacker.Instance(
                    self,
                    f"Instance number {i}",
                    instance_type=ec2_stacker.InstanceType('t2.micro'),
                    machine_image=amazon_linux_2,
                    vpc=covid_free_vpc,
                    security_group=get_web_sg(),
                    user_data=ec2_stacker.UserData.custom(get_userdata()))
                ec2_objects.append(ec2_instnace)
            return ec2_objects
Exemplo n.º 8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # to deploy in our default VPC
        vpc = aws_ec2.Vpc.from_lookup(self, 'VPC', is_default=True)

        linux = aws_ec2.GenericLinuxImage({
            'us-west-2': 'i-1111111',
            'ap-northeast-1': 'i-222222'
        })

        aws_ec2.Instance(self,
                         'EC2',
                         instance_type=aws_ec2.InstanceType.of(
                             aws_ec2.InstanceClass.BURSTABLE3,
                             aws_ec2.InstanceSize.LARGE),
                         machine_image=linux,
                         vpc=vpc),
Exemplo n.º 9
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 config: dict, region: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        ### EC2 Server for Jenkins
        image = ec2.GenericLinuxImage({
            region: config["ami_id"],
        }, )

        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        subnet = vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE).subnets[0]
        subnet_selection = ec2.SubnetSelection(subnets=[subnet])

        self.security_group = ec2.SecurityGroup(self, "EC2SG", vpc=vpc)

        self._instance = ec2.Instance(self,
                                      "EC2",
                                      instance_type=ec2.InstanceType(
                                          config["instance_type"]),
                                      machine_image=image,
                                      vpc=vpc,
                                      vpc_subnets=subnet_selection,
                                      role=role,
                                      security_group=self.security_group)

        ### Lambda for github webhooks
        self._webhook_forwarder = _lambda.Function(
            self,
            "WebHookForwarder",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(dirname, "lambda", "webhook_forwarder")),
            handler="lambda_function.lambda_handler",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnets))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        def get_userdata():
            with open('bootstrap.sh', 'r') as userdata:
                return userdata.read()

        vmie_ami = "ami-00bf35d2ab0bdb452"
        default_vpc = "vpc-e94d1f93"
        ec2_role = "arn:aws:iam::88888888888:role/KratosRole"
        account_id = "8888888888"
        vm_import_image = aws_ec2.GenericLinuxImage({"us-east-1": vmie_ami})
        core.Environment(account=account_id)
        kratos_role = aws_iam.Role.from_role_arn(self, 'KratosXL', role_arn=ec2_role)

        aws_ec2.Instance(self, f"VMIE-{vmie_ami}", instance_type=aws_ec2.InstanceType('t2.micro'),
        role=kratos_role, machine_image=vm_import_image,  security_group=aws_ec2.CfnSecurityGroup(self, id=f"SG-{vmie_ami}",
        group_description=f"SG-CDK-{vmie_ami}"), vpc=aws_ec2.Vpc.from_lookup(self, f'CDK-VPC--{vmie_ami}', vpc_id=default_vpc),
        user_data=aws_ec2.UserData.custom(get_userdata()),   key_name="covidQuarantine")
Exemplo n.º 11
0
    def _create_instance(self,num):

        InitData = open("aws_cdk_python/Zach_EC2_Stack/init.sh", "rb").read()
        ZachInitInstall=ec2.UserData.for_linux()
        ZachInitInstall.add_commands(str(InitData,encoding='UTF-8'))
        Zach_Ec2_InstanceImage = ec2.GenericLinuxImage(ami_map={"ap-southeast-1":"ami-048a01c78f7bae4aa"})
        Zach_Ec2_Instance_Key="aws-sg-root"

        try:
            TypeClass=ec2.InstanceClass.BURSTABLE2
            TypeSize=ec2.InstanceSize.MICRO
            Zach_Ec2_InstanceType=ec2.InstanceType.of(TypeClass,TypeSize)
        except Exception as e:
            Zach_Ec2_InstanceType=ec2.InstanceType(instance_type_identifier="t2.micro")

        vpc = ec2.Vpc.from_lookup(self,id="Zach_VPC_Stack_A",vpc_id="vpc-01e73b4b5c6f9f98a")
        for ids in range(num):
            ZachSequence = self.__class__.__name__+"-"+str(ids).zfill(5)
            Zach_Ec2_Instance=ec2.Instance(self,id=ZachSequence,instance_name=ZachSequence,
                                           instance_type=Zach_Ec2_InstanceType,
                                           machine_image=Zach_Ec2_InstanceImage,
                                           vpc=vpc,
                                           vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                                           key_name=Zach_Ec2_Instance_Key,
                                           user_data=ZachInitInstall)
            Zach_Ec2_Instance.add_security_group(ec2.SecurityGroup.from_security_group_id(self,id=ZachSequence+"SSH",
                                                                                          security_group_id="sg-02fb046cddb80deca"))
            Zach_Ec2_Instance.add_security_group(ec2.SecurityGroup.from_security_group_id(self, id=ZachSequence+"WEB",
                                                                                          security_group_id="sg-07a1d71bba92f38ca"))
            Zach_Ec2_Instance_Tag=[]
            Zach_Ec2_Instance_Tag.append(core.Tag.add(self,"OStype","Linux"))
            Zach_Ec2_Instance_Tag.append(core.Tag.add(self,"K8S","False"))
            Zach_Ec2_Instance_Tag.append(core.Tag.add(self,"Publish","True"))

            core.CfnOutput(self, "InstanceID-"+str(ids), value=Zach_Ec2_Instance.instance_id)
            core.CfnOutput(self, "InstancePubIP-"+str(ids), value=Zach_Ec2_Instance.instance_public_ip)
            core.CfnOutput(self, "InstancePrivIP-"+str(ids),value=Zach_Ec2_Instance.instance_private_ip)
            core.CfnOutput(self, "InstanceOS-"+str(ids), value=Zach_Ec2_Instance.os_type.value)
            core.CfnOutput(self, "InstanceRole-"+str(ids), value=Zach_Ec2_Instance.role.role_arn)
            core.CfnOutput(self, "InstanceBindSG-"+str(ids),value=str(Zach_Ec2_Instance.connections.security_groups))
            core.CfnOutput(self, "InstanceLogicID-"+str(ids), value=Zach_Ec2_Instance.instance.logical_id)
Exemplo n.º 12
0
 def generate_instances(count=1):
     amazon_linux_2 = aws_ec2.GenericLinuxImage(
         {"us-east-1": "ami-0fc61db8544a617ed"})
     ec2_objects = []
     for i in range(count):
         ec2_instnace = aws_ec2.Instance(
             self,
             f"CDK-Instance-{i + int(1)}",
             instance_type=aws_ec2.InstanceType('t2.micro'),
             role=kratos_role,
             machine_image=amazon_linux_2,
             security_group=aws_ec2.CfnSecurityGroup(
                 self,
                 id=f"SG{i + int(1)}",
                 group_description=f"SG-CDK-{i}"),
             vpc=aws_ec2.Vpc.from_lookup(self,
                                         f'CDK-VPC-{i + int(1)}',
                                         vpc_id="vpc-eeeee3"),
             user_data=aws_ec2.UserData.custom(get_userdata()),
             key_name="covidQuarantine")
         ec2_objects.append(ec2_instnace)
     return ec2_objects
Exemplo n.º 13
0
    def _add_autoscaling_group(self, vpc: ec2.Vpc,
                               public_subnet: ec2.SubnetConfiguration,
                               security_group: ec2.SecurityGroup,
                               role: iam.Role) -> autoscaling.AutoScalingGroup:
        """
        Add autoscaling group for running ec2 instance automatically
        """
        Zach_Ec2_InstanceImage = ec2.GenericLinuxImage(
            ami_map={"ap-southeast-1": "ami-048a01c78f7bae4aa"})

        group = autoscaling.AutoScalingGroup(
            self,
            'zach-autoscale',
            vpc=vpc,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=Zach_Ec2_InstanceImage,
            max_capacity=1,
            vpc_subnets=public_subnet,
            associate_public_ip_address=True,
            key_name='aws-sg-root',
            role=role)
        group.add_security_group(security_group)
        return group
Exemplo n.º 14
0
from aws_cdk import core
import aws_cdk.aws_ec2 as ec2

vpc_id = "vpc-ebe75a8c"  # Import an Exist VPC
ec2_type = "t3a.micro"
key_name = "stg-intrinio-www01"
linux_ami = ec2.GenericLinuxImage({
    "us-east-1": "ami-0323c3dd2da7fb37d"
})
with open("./user_data/user_data.sh") as f:
    user_data = f.read()


class CdkVpcEc2Stack(core.Stack):

    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=vpc_id)

        host = ec2.Instance(self, "myEC2",
                            instance_type=ec2.InstanceType(
                                instance_type_identifier=ec2_type),
                            instance_name="mySingleHost",
                            machine_image=linux_ami,
                            vpc=vpc,
                            key_name=key_name,
                            vpc_subnets=ec2.SubnetSelection(
                                subnet_type=ec2.SubnetType.PUBLIC),
                            # user_data=ec2.UserData.custom(user_data)
Exemplo n.º 15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Some error prevented me from creating the subnets properly
        #  Will investigate soon-ish
        vpc = ec2.Vpc(
            self,
            "VPC",
        )
        sg_default = ec2.SecurityGroup.from_security_group_id(
            self, "SG", vpc.vpc_default_security_group)

        # Ensure:
        # 1) password is derived from SM ✓
        # 2) placement is in private subnets ✓
        # 3) Slave in other subnet -

        # Defaults:
        #  - master password stored in SM
        #  - default placement in private subnets
        db = rds.DatabaseInstance(self,
                                  "RDS",
                                  master_username="******",
                                  database_name="db1",
                                  engine_version="8.0.16",
                                  engine=rds.DatabaseInstanceEngine.MYSQL,
                                  vpc=vpc,
                                  port=3306,
                                  instance_class=ec2.InstanceType.of(
                                      ec2.InstanceClass.MEMORY4,
                                      ec2.InstanceSize.LARGE,
                                  ),
                                  security_groups=[sg_default],
                                  removal_policy=core.RemovalPolicy.DESTROY,
                                  deletion_protection=False)

        default_userdata = ec2.UserData.for_linux()
        db_password = db.secret.secret_value.to_string()
        db_host = db.instance_endpoint.hostname
        default_userdata.add_commands(
            "echo \"username=admin\npassword={}\nhost={}\" > /home/ubuntu/.mysql.creds"
            .format(db_password, db_host))

        asg = autoscaling.AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.GenericLinuxImage(
                {"eu-west-1": "ami-02df9ea15c1778c9c"}),
            desired_capacity=2,
            # Probably ensure we can access the machines...
            associate_public_ip_address=True,
            key_name="nicolai-test",
            user_data=default_userdata,
        )
        # asg.add_security_group(ssh_from_everywhere)

        alb = elb2.ApplicationLoadBalancer(
            self,
            "ALB",
            vpc=vpc,
            internet_facing=True,
        )
        listener = alb.add_listener(
            "ALBListener",
            protocol=elb2.ApplicationProtocol.HTTP,
        )
        listener.add_targets("ASGTarget", targets=[asg], port=8080)
Exemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, tags=default_tags, **kwargs)

        # VPC
        vpc = ec2.Vpc(
            self,
            'k8s-real-hard-way-vpc',
            cidr=vpc_cidr,
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    cidr_mask=24,
                    name='Public',
                    subnet_type=ec2.SubnetType.PUBLIC,

                ),
                ec2.SubnetConfiguration(
                    cidr_mask=24,
                    name='Private',
                    subnet_type=ec2.SubnetType.PRIVATE
                )
            ]
        )

        # Ubuntu AMI from dict mapping
        ubuntu_ami = ec2.GenericLinuxImage(
            ami_map={
                aws_region: ami_region_map.get(aws_region)
            }
        )

        # Get HostedZone ID from HostedZone Name
        zoneid = route53.HostedZone.from_lookup(
            self,
            "k8s-real-hard-way-zone",
            domain_name=zone_fqdn
        )
        zoneid_str = zoneid.hosted_zone_id

        # IAM Policy for Bastion Instance Profile
        iampolicystatement = iam.PolicyStatement(
            actions=[
                "ec2:CreateRoute",
                "ec2:CreateTags",
                "ec2:DescribeAutoScalingGroups",
                "autoscaling:DescribeAutoScalingInstances",
                "ec2:DescribeRegions",
                "ec2:DescribeRouteTables",
                "ec2:DescribeInstances",
                "ec2:DescribeTags",
                "elasticloadbalancing:DescribeLoadBalancers",
                "route53:ListHostedZonesByName"
            ],
            effect=iam.Effect.ALLOW,
            resources=[
                "*"
            ]
        )
        iampolicystatement_route53 = iam.PolicyStatement(
            actions=[
                "route53:ChangeResourceRecordSets"
            ],
            effect=iam.Effect.ALLOW,
            resources=[
                "arn:aws:route53:::" + zoneid_str[1:]
            ]
        )
        # BASTION HOST
        # AutoScalingGroup
        bastion = autoscaling.AutoScalingGroup(
            self,
            "bastion",
            vpc=vpc,
            min_capacity=bastion_min_capacity,
            max_capacity=bastion_max_capacity,
            desired_capacity=bastion_desired_capacity,
            instance_type=ec2.InstanceType(bastion_instance_type),
            machine_image=ec2.AmazonLinuxImage(),
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        bastion.add_to_role_policy(iampolicystatement)
        bastion.add_to_role_policy(iampolicystatement_route53)

        cfn_bastion = bastion.node.default_child
        cfn_bastion.auto_scaling_group_name = "bastion"
        cfn_bastion_lc = bastion.node.find_child('LaunchConfig')
        cfn_bastion_lc.launch_configuration_name = "bastion"

        # Classic LoadBalancer
        bastion_lb = elb.LoadBalancer(
            self,
            "bastion-lb",
            vpc=vpc,
            internet_facing=True,
            health_check=elb.HealthCheck(
                port=22,
                protocol=elb.LoadBalancingProtocol.TCP
            )
        )

        cfn_bastion_lb = bastion_lb.node.default_child
        cfn_bastion_lb.load_balancer_name = "bastion"

        bastion_lb.add_listener(
            external_port=22,
            external_protocol=elb.LoadBalancingProtocol.TCP,
            allow_connections_from=[ec2.Peer().ipv4(myipv4)]
        )
        bastion_lb.add_target(
            target=bastion
        )
        # UserData
        bastion.add_user_data(
            "sudo yum update",
            "sudo yum upgrade -y",
            "sudo yum install jq tmux -y",
            "wget https://gist.githubusercontent.com/dmytro/3984680/raw/1e25a9766b2f21d7a8e901492bbf9db672e0c871/ssh-multi.sh -O /home/ec2-user/tmux-multi.sh",
            "chmod +x /home/ec2-user/tmux-multi.sh",
            "wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 && chmod +x cfssl_linux-amd64 && sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl && sudo chown ec2-user:ec2-user /usr/local/bin/cfssl",
            "wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 && chmod +x cfssljson_linux-amd64 && sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson && sudo chown ec2-user:ec2-user /usr/local/bin/cfssljson",
            "curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl && sudo mv kubectl /usr/local/bin/kubectl && chown ec2-user:ec2-user /usr/local/bin/kubectl",
            "sudo hostname " + "bastion" + "." + zone_fqdn,
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment"
        )
        # Route53 Alias Target for LB
        route53_target = route53_targets.ClassicLoadBalancerTarget(bastion_lb)
        # Route53 Record for Bastion Host LB
        route53_bastion = route53.ARecord(
            self,
            "bastion-lb-route53",
            target=route53.RecordTarget.from_alias(route53_target),
            zone=zoneid,
            comment="Bastion Host LB",
            record_name='bastion'
        )

        # ETCD
        # AutoScalingGroup
        etcd = autoscaling.AutoScalingGroup(
            self,
            "etcd",
            vpc=vpc,
            min_capacity=etcd_min_capacity,
            max_capacity=etcd_max_capacity,
            desired_capacity=etcd_desired_capacity,
            instance_type=ec2.InstanceType(etcd_instance_type),
            machine_image=ubuntu_ami,
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        etcd.add_to_role_policy(iampolicystatement)

        cfn_etcd = etcd.node.default_child
        cfn_etcd.auto_scaling_group_name = "etcd"
        cfn_etcd_lc = etcd.node.find_child('LaunchConfig')
        cfn_etcd_lc.launch_configuration_name = "etcd"

        # UserData
        etcd.add_user_data(
            "sudo apt-get update",
            "sudo apt-get upgrade -y",
            "sudo apt-get install python3-pip -y",
            "sudo pip3 install awscli",
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment",
            "echo \"INTERNAL_IP=$(curl -s http://169.254.169.254/1.0/meta-data/local-ipv4)\" | sudo tee -a /etc/environment"
        )

        # KUBERNETES MASTER Load Balancer
        # Public Load Balancer (for remote kubectl access)
        master_public_lb = elb.LoadBalancer(
            self,
            "k8s-real-hard-way-master-public-lb",
            vpc=vpc,
            internet_facing=True,
            health_check=elb.HealthCheck(
                port=6443,
                protocol=elb.LoadBalancingProtocol.TCP
            )
        )
        master_public_lb.add_listener(
            external_port=6443,
            external_protocol=elb.LoadBalancingProtocol.TCP,
            allow_connections_from=[ec2.Peer().ipv4(myipv4)]
        )

        cfn_master_public_lb = master_public_lb.node.default_child
        cfn_master_public_lb.load_balancer_name = "master-public"

        # Private Load Balancer (fronting kube-apiservers)
        master_private_lb = elb.LoadBalancer(
            self,
            "k8s-real-hard-way-master-private-lb",
            vpc=vpc,
            internet_facing=False,
            health_check=elb.HealthCheck(
                port=6443,
                protocol=elb.LoadBalancingProtocol.TCP
            )
        )
        master_private_lb.add_listener(
            external_port=6443,
            external_protocol=elb.LoadBalancingProtocol.TCP,
            allow_connections_from=[]
        )

        cfn_master_private_lb = master_private_lb.node.default_child
        cfn_master_private_lb.load_balancer_name = "master-private"

        # AutoScalingGroup
        master = autoscaling.AutoScalingGroup(
            self,
            "master",
            vpc=vpc,
            min_capacity=master_min_capacity,
            max_capacity=master_max_capacity,
            desired_capacity=master_desired_capacity,
            instance_type=ec2.InstanceType(master_instance_type),
            machine_image=ubuntu_ami,
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        master.add_to_role_policy(iampolicystatement)

        cfn_master = master.node.default_child
        cfn_master.auto_scaling_group_name = "master"
        cfn_master_lc = master.node.find_child('LaunchConfig')
        cfn_master_lc.launch_configuration_name = "master"

        # Add ASG as target for LBs
        master_public_lb.add_target(
            target=master
        )
        master_private_lb.add_target(
            target=master
        )
        # UserData
        master.add_user_data(
            "sudo apt-get update",
            "sudo apt-get upgrade -y",
            "sudo apt-get install python3-pip -y",
            "sudo pip3 install awscli",
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment",
            "echo \"INTERNAL_IP=$(curl -s http://169.254.169.254/1.0/meta-data/local-ipv4)\" | sudo tee -a /etc/environment"
        )

        # KUBERNETES WORKER
        worker = autoscaling.AutoScalingGroup(
            self,
            "worker",
            vpc=vpc,
            min_capacity=worker_min_capacity,
            max_capacity=worker_max_capacity,
            desired_capacity=worker_desired_capacity,
            instance_type=ec2.InstanceType(worker_instance_type),
            machine_image=ubuntu_ami,
            key_name=ssh_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_name='Private'
            ),
            associate_public_ip_address=False
        )
        worker.add_to_role_policy(iampolicystatement)

        cfn_worker = worker.node.default_child
        cfn_worker.auto_scaling_group_name = "worker"
        cfn_worker_lc = worker.node.find_child('LaunchConfig')
        cfn_worker_lc.launch_configuration_name = "worker"

        # UserData
        worker.add_user_data(
            "sudo apt-get update",
            "sudo apt-get upgrade -y",
            "sudo apt-get install python3-pip -y",
            "sudo pip3 install awscli",
            "RANDOM_NUMBER=$(shuf -i 10-250 -n 1)",
            "echo \"POD_CIDR=" + pod_cidr + ".$RANDOM_NUMBER.0/24\" | sudo tee -a /etc/environment",
            "echo \"AWS_DEFAULT_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\\\" '{print $4}')\" | sudo tee -a /etc/environment",
            "echo \"HOSTEDZONE_NAME=" + zone_fqdn + "\" | sudo tee -a /etc/environment",
            "echo \"INTERNAL_IP=$(curl -s http://169.254.169.254/1.0/meta-data/local-ipv4)\" | sudo tee -a /etc/environment"
        )

        # SecurityGroups
        # Bastion LB
        bastion_lb_sg = ec2.SecurityGroup(
            self,
            "bastion-lb-sg",
            vpc=vpc,
            allow_all_outbound=True,
            description="Bastion-LB",
        )
        # Kubernetes Master Public LB
        master_public_lb_sg = ec2.SecurityGroup(
            self,
            "k8s-real-hard-way-master-public-lb-sg",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s MasterPublicLB",
        )
        # Kubernetes Master Private LB
        master_private_lb_sg = ec2.SecurityGroup(
            self,
            "k8s-real-hard-way-master-private-lb-sg",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s MasterPrivateLB",
        )
        # Bastion
        bastion_security_group = ec2.SecurityGroup(
            self,
            "bastion-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="Bastion"
        )
        # etcd
        etcd_security_group = ec2.SecurityGroup(
            self,
            "etcd-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="etcd"
        )
        # Kubernetes Master
        master_securiy_group = ec2.SecurityGroup(
            self,
            "master-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s Master",
        )
        # Kubernetes Worker
        worker_security_group = ec2.SecurityGroup(
            self,
            "worker-security-group",
            vpc=vpc,
            allow_all_outbound=True,
            description="K8s Worker"
        )

        # SecurityGroup Rules
        # Bastion LB
        bastion_lb_sg.add_ingress_rule(
            peer=ec2.Peer().ipv4(myipv4),
            connection=ec2.Port.tcp(22),
            description="SSH: Workstation - MasterPublicLB"
        )
        # Master Public LB
        master_public_lb_sg.add_ingress_rule(
            peer=ec2.Peer().ipv4(myipv4),
            connection=ec2.Port.tcp(6443),
            description="kubectl: Workstation - MasterPublicLB"
        )
        master_public_lb_sg.add_ingress_rule(
            peer=master_securiy_group,
            connection=ec2.Port.tcp(6443),
            description="kubeapi: Workers - MasterPublicLB"
        )
        # Master Private LB
        # master_private_lb_sg.add_ingress_rule(
        #     peer=master_securiy_group,
        #     connection=ec2.Port.tcp(6443),
        #     description="kubectl: Masters - MasterPrivateLB"
        # )
        # master_private_lb_sg.add_ingress_rule(
        #     peer=worker_security_group,
        #     connection=ec2.Port.tcp(6443),
        #     description="kubeapi: Workers - MasterPrivateLB"
        # )
        master_private_lb_sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(6443),
            description="kubectl: ALL - MasterPrivateLB"
        )
        # Bastion Host
        bastion_security_group.add_ingress_rule(
            peer=bastion_lb_sg,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion-LB - Bastio"
        )
        # etcd
        etcd_security_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion - Etcds"
        )
        etcd_security_group.add_ingress_rule(
            peer=master_securiy_group,
            connection=ec2.Port.tcp_range(start_port=2379, end_port=2380),
            description="etcd: Masters - Etcds"
        )
        etcd_security_group.add_ingress_rule(
            peer=etcd_security_group,
            connection=ec2.Port.tcp_range(start_port=2379, end_port=2380),
            description="etcd: Etcds - Etcds"
        )
        # K8s-Master
        master_securiy_group.add_ingress_rule(
            peer=worker_security_group,
            connection=ec2.Port.all_traffic(),
            description="ALL: Workers - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(6443),
            description="kubectl: Bastion - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=master_public_lb_sg,
            connection=ec2.Port.tcp(6443),
            description="kubectl: MasterPublicLB - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=master_private_lb_sg,
            connection=ec2.Port.tcp(6443),
            description="kubectl: MasterPrivateLB - Masters"
        )
        master_securiy_group.add_ingress_rule(
            peer=worker_security_group,
            connection=ec2.Port.tcp(6443),
            description="kubectl: Workers - Masters"
        )
        # K8s-Worker
        worker_security_group.add_ingress_rule(
            peer=master_securiy_group,
            connection=ec2.Port.all_traffic(),
            description="ALL: Master - Workers"
        )
        worker_security_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(22),
            description="SSH: Bastion - Workers"
        )
        worker_security_group.add_ingress_rule(
            peer=bastion_security_group,
            connection=ec2.Port.tcp(6443),
            description="kubectl: Bastion - Workers"
        )

        # Add SecurityGroups to resources
        bastion.add_security_group(bastion_security_group)
        etcd.add_security_group(etcd_security_group)
        master.add_security_group(master_securiy_group)
        worker.add_security_group(worker_security_group)
        cfn_master_public_lb.security_groups = [
            master_public_lb_sg.security_group_id
        ]
        cfn_master_private_lb.security_groups = [
            master_private_lb_sg.security_group_id
        ]

        # Add specific Tags to resources
        core.Tag.add(
            bastion,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-bastion'
        )
        core.Tag.add(
            bastion_lb,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-bastion-lb'
        )
        core.Tag.add(
            master_public_lb,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-master-lb'
        )
        core.Tag.add(
            etcd,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-etcd'
        )
        core.Tag.add(
            master,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-k8s-master'
        )
        core.Tag.add(
            worker,
            apply_to_launched_instances=True,
            key='Name',
            value=tag_project + '-k8s-worker'
        )
        for subnet in vpc.private_subnets:
            core.Tag.add(
                subnet,
                key='Attribute',
                value='private'
            )
        for subnet in vpc.public_subnets:
            core.Tag.add(
                subnet,
                key='Attribute',
                value='public'
            )
Exemplo n.º 17
0
from aws_cdk import core
import aws_cdk.aws_ec2 as ec2

vpc_id = "vpc-111111"  # Import an Exist VPC
ec2_type = "m5.xlarge"
key_name = "id_rsa"
linux_ami = ec2.GenericLinuxImage({
    "cn-northwest-1": "ami-0f62e91915e16cfc2",
    "eu-west-1": "ami-1111111"
})
with open("./user_data/user_data.sh") as f:
    user_data = f.read()


class CdkVpcEc2Stack(core.Stack):
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=vpc_id)

        host = ec2.Instance(
            self,
            "myEC2",
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            instance_name="mySingleHost",
            machine_image=linux_ami,
            vpc=vpc,
            key_name=key_name,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            user_data=ec2.UserData.custom(user_data))
Exemplo n.º 18
0
# EMQ Full Stack
from aws_cdk import core
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_elasticloadbalancingv2 as elb
from aws_cdk import aws_autoscaling as autoscaling
from aws_cdk.core import Duration, CfnParameter
from aws_cdk.aws_autoscaling import HealthCheck
from aws_cdk import aws_rds as rds
# from cdk_stack import AWS_ENV

linux_ami = ec2.GenericLinuxImage({
    #"eu-west-1": "ami-06fd78dc2f0b69910", # ubuntu 18.04 latest
    "eu-west-1": "ami-09c60c18b634a5e00",  # ubuntu 20.04 latest
})

with open("./user_data/user_data.sh") as f:
    user_data = f.read()


class EmqFullStack(core.Stack):
    def __init__(self, scope: core.Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        # The code that defines your stack goes here

        vpc = ec2.Vpc(
            self,
            "VPC_EMQ",
            max_azs=2,
            cidr="10.10.0.0/16",
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #return userdata script
        def get_userdata():
            with open('userdata.sh') as userdata:
                script = userdata.read()
            return script

        #get public subnet
        def get_public_subnet():
            public_subnet = covid_free_vpc.select_subnets(
                subnet_type=ec2_stacker.SubnetType.PUBLIC)
            return public_subnet.subnets[0]

        def get_web_sg():
            return covid_free_sg

        #Generate 4 EC2 Instances
        def generate_instances(count=4):
            amazon_linux_2 = ec2_stacker.GenericLinuxImage(
                {"us-east-1": "ami-0fc61db8544a617ed"})

            ec2_objects = []
            for i in range(count):
                ec2_instnace = ec2_stacker.Instance(
                    self,
                    f"Instance number {i}",
                    instance_type=ec2_stacker.InstanceType('t2.micro'),
                    machine_image=amazon_linux_2,
                    vpc=covid_free_vpc,
                    security_group=get_web_sg(),
                    user_data=ec2_stacker.UserData.custom(get_userdata()))
                ec2_objects.append(ec2_instnace)
            return ec2_objects

        #utilty script, fetches instance ids/ references in cloudformation
        def get_instance_reference_ids():
            data = generate_instances()
            for i in data:
                yield (i.instance_id)

        #create vpc with public & private subnet
        covid_free_vpc = ec2_stacker.Vpc(
            self,
            'CovidFreeVPC',
            enable_dns_support=True,
            enable_dns_hostnames=True,
            max_azs=3,
            subnet_configuration=[
                ec2_stacker.SubnetConfiguration(
                    subnet_type=ec2_stacker.SubnetType.PUBLIC,
                    name='InternetFacingSubnetGroup',
                    cidr_mask=24)
            ])
        #ec2_stacker.SubnetConfiguration(subnet_type=ec2_stacker.SubnetType.PRIVATE, name='PrivateSubnetGroup',  cidr_mask=24 )
        #])

        #creating security group requires vpc param
        covid_free_sg = ec2_stacker.SecurityGroup(self,
                                                  'CovidFreeSG',
                                                  allow_all_outbound=True,
                                                  vpc=covid_free_vpc)
        covid_free_sg.add_ingress_rule(peer=ec2_stacker.Peer.any_ipv4(),
                                       connection=ec2_stacker.Port.tcp(80),
                                       description="allow http traffic")
        covid_free_sg.add_ingress_rule(ec2_stacker.Peer.any_ipv4(),
                                       ec2_stacker.Port.tcp(22),
                                       description="allow ssh traffic")

        #create launch config (userdata is also a valid param)
        covid_free_lc = ats_stacker.CfnLaunchConfiguration(
            self,
            'CovidFreeLC',
            instance_type='t2.micro',
            image_id='ami-0fc61db8544a617ed')

        #set up autoscaling group - better to add userdata from asg, does not throw base64 error. Able to use strings
        instance_type = ec2_stacker.InstanceType.of(
            ec2_stacker.InstanceClass.BURSTABLE2,
            ec2_stacker.InstanceSize.MICRO)
        amazon_linux_2 = ec2_stacker.GenericLinuxImage(
            {"us-east-1": "ami-0fc61db8544a617ed"})
        covid_free_asg = ats_stacker.AutoScalingGroup(
            self,
            'CovidFreeASG',
            vpc=covid_free_vpc,
            associate_public_ip_address=True,
            key_name="CoronaVirusKP",
            instance_type=instance_type,
            machine_image=amazon_linux_2,
            min_capacity=5,
            max_capacity=10)
        covid_free_asg.add_user_data(get_userdata())

        #Register targets to ELB is an Autoscaling group, must be a list/array ot targets
        elb = elb_stacker.LoadBalancer(self,
                                       'CovidFreeELB',
                                       cross_zone=True,
                                       vpc=covid_free_vpc,
                                       health_check={"port": 80},
                                       internet_facing=True,
                                       subnet_selection=get_public_subnet(),
                                       targets=[covid_free_asg])
        elb.add_listener(external_port=80)
Exemplo n.º 20
0
from aws_cdk import core
from aws_cdk.aws_iam import (Role, ServicePrincipal, ManagedPolicy)
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_elasticloadbalancingv2 as elb
import aws_cdk.aws_autoscaling as autoscaling

ec2_type = "t2.micro"
#key_name = "keypair" #EC2 keyname
linux_ami = ec2.GenericLinuxImage({"ap-northeast-2": "ami-0bea7fd38fabe821a"})
with open("./userdata/userdata.sh") as f:
    userdata = f.read()


class SampleEC2Stack(core.Stack):
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        alb = elb.ApplicationLoadBalancer(self,
                                          "myALB",
                                          vpc=vpc,
                                          internet_facing=True,
                                          load_balancer_name="myALB")
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80),
                                            "Internet access ALB 80")
        listener = alb.add_listener("my80", port=80, open=True)
        self.asg = autoscaling.AutoScalingGroup(
            self,
            "myASG",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
Exemplo n.º 21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Set a vpc
        vpc = ec2.Vpc.from_lookup(self, "VPC", is_default=True)
        vpc_subnets = ec2.SubnetSelection()

        # Set access policies for the instance
        policies = [
            # Read only access for all our s3 buckets
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"),
            # Allow us login by the ssm manger
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore")
        ]

        # Get role object with set policies
        role = iam.Role(self,
                        "EC2Role",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
                        managed_policies=policies)

        # Get a root ebs volume with specified sizw (mount: /dev/sda1)
        ebs_root_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("ROOT_VOLUME_SIZE")))
        ebs_root_block_device = ec2.BlockDevice(device_name="/dev/sda1",
                                                volume=ebs_root_vol)

        # Get volume - contains a block device volume and a block device
        ebs_extended_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("EXTENDED_VOLUME_SIZE")))
        # Place volume on a block device with a set mount point
        ebs_extended_block_device = ec2.BlockDevice(device_name=EXT_DEV_NAME,
                                                    volume=ebs_extended_vol)

        # # Run boot strap - User Data
        mappings = {
            "__EXT_DEV_NAME__": EXT_DEV_NAME,
            "__EXT_DEV_MOUNT__": '/mnt/gen3'
        }
        with open("user_data/user_data.sh", 'r') as user_data_h:
            user_data_sub = core.Fn.sub(user_data_h.read(), mappings)
        user_data = ec2.UserData.custom(user_data_sub)

        # Set instance type from ec2-type in context
        instance_type = ec2.InstanceType(
            instance_type_identifier=self.node.try_get_context("EC2_TYPE"))

        machine_image = ec2.GenericLinuxImage({
            self.region:
            self.node.try_get_context(
                "MACHINE_IMAGE"),  # Refer to an existing AMI type
        })

        # The code that defines your stack goes here
        # We take all of the parameters we have and place this into the ec2 instance class
        # Except LaunchTemplate which is added as a property to the instance
        host = ec2.Instance(
            self,
            id="Gen3AdminVm",
            instance_type=instance_type,
            instance_name=self.node.try_get_context("INSTANCE_NAME"),
            machine_image=machine_image,
            vpc=vpc,
            vpc_subnets=vpc_subnets,
            role=role,
            user_data=user_data,
            block_devices=[ebs_root_block_device, ebs_extended_block_device],
        )

        # Return instance ID
        core.CfnOutput(self, "Output", value=host.instance_id)
Exemplo n.º 22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get/set stack name for context
        self.node.set_context("STACK_NAME", self.stack_name)

        # The code that defines your stack goes here
        # Set a vpc
        vpc = ec2.Vpc.from_lookup(self, "VPC", is_default=True)
        vpc_subnets = ec2.SubnetSelection()

        # Set access policies for the instance
        policies = [
            # Read only access for all our s3 buckets
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3ReadOnlyAccess"),
            # Set the container registry policy so we can pull docker containers from our ECR repo
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonEC2ContainerRegistryReadOnly"),
            # Allow us login by the ssm manger
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore")
        ]

        # Get role object with set policies
        role = iam.Role(self,
                        "EC2Role",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
                        managed_policies=policies)

        # Get a root ebs volume (we mount it on /dev/xvda1)
        ebs_var_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("VAR_VOLUME_SIZE")))
        # Place volume on a block device with the set mount point
        ebs_var_block_device = ec2.BlockDevice(device_name="/dev/sdf",
                                               volume=ebs_var_vol)

        # Get volume - contains a block device volume and a block device
        ebs_extended_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("EXTENDED_VOLUME_SIZE")))
        # Place volume on a block device with a set mount point
        ebs_extended_block_device = ec2.BlockDevice(device_name="/dev/sdg",
                                                    volume=ebs_extended_vol)

        # Run boot strap -
        """
        The code under userdata.sh completes the following steps
        1. Installs docker into ec2 instance
        2. Mounts our volume to /mnt/
        3. Log into docker
        """

        mappings = {
            "__ACCOUNT_ID__": str(self.account),
            "__REGION__": str(self.region)
        }

        with open("user_data/user_data.sh", 'r') as user_data_h:
            # Use a substitution
            user_data_sub = core.Fn.sub(user_data_h.read(), mappings)

        # Import substitution object into user_data set
        user_data = ec2.UserData.custom(user_data_sub)

        # Set instance type from ec2-type in context
        instance_type = ec2.InstanceType(
            instance_type_identifier=self.node.try_get_context("EC2_TYPE"))

        # Get machine type from context
        machine_image = ec2.GenericLinuxImage({
            self.region:
            self.node.try_get_context(
                "MACHINE_IMAGE"),  # Refer to an existing AMI type
        })

        # The code that defines your stack goes here
        # We take all of the parameters we have and place this into the ec2 instance class
        # Except LaunchTemplate which is added as a property to the instance
        host = ec2.Instance(
            self,
            id="{}-instance".format(self.node.try_get_context("STACK_NAME")),
            instance_type=instance_type,
            instance_name=self.node.try_get_context("INSTANCE_NAME"),
            machine_image=machine_image,
            vpc=vpc,
            vpc_subnets=vpc_subnets,
            role=role,
            user_data=user_data,
            block_devices=[ebs_var_block_device, ebs_extended_block_device],
        )

        if self.node.try_get_context("USE_SPOT_INSTANCE").lower() == 'true':
            # Spot pricing via ec2 fleet
            spot_price = self.node.try_get_context("MAX_SPOT_PRICE")
            market_options = {"MarketType": "spot"}
            if spot_price is not None:
                spot_options = {"MaxPrice": spot_price}
                market_options["SpotOptions"] = spot_options
            launch_template_data = {"InstanceMarketOptions": market_options}
            launch_template = ec2.CfnLaunchTemplate(self, "LaunchTemplate")
            launch_template.add_property_override("LaunchTemplateData",
                                                  launch_template_data)

            host.instance.add_property_override(
                "LaunchTemplate", {
                    "LaunchTemplateId": launch_template.ref,
                    "Version": launch_template.attr_latest_version_number
                })

        # Return public IP address s.t we can ssh into it
        # Note that we may return an IP prior to the user_data shell script completing so not
        # all of our goodies may be here yet
        core.CfnOutput(self, "Output", value=host.instance_id)
Exemplo n.º 23
0
from aws_cdk import core
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_elasticloadbalancingv2 as elb
from aws_cdk import aws_autoscaling as autoscaling
from aws_cdk.core import Duration, CfnParameter
from aws_cdk.aws_autoscaling import HealthCheck
# from cdk_stack import AWS_ENV

# ec2_type = "t2.micro"
# key_name = "key_ireland"
# linux_ami = ec2.LookupMachineImage(name="emqx429")

linux_ami = ec2.GenericLinuxImage({
    "eu-west-1": "ami-06fd78dc2f0b69910",  # ubuntu 18.04 latest
})

with open("./user_data/user_data.sh") as f:
    user_data = f.read()


class CdkEc2Stack(core.Stack):
    def __init__(self, scope: core.Construct, id: str, vpc, env,
                 **kwargs) -> None:
        super().__init__(scope, id, env=env, **kwargs)

        # Define cfn parameters
        ec2_type = CfnParameter(
            self,
            "ec2-instance-type",
            type="String",
            default="t2.micro",
Exemplo n.º 24
0
from aws_cdk import core
import aws_cdk.aws_ec2 as ec2

vpc_id = "MY-VPC-ID"  # Import an Exist VPC
ec2_type = "t2.micro"
key_name = "id_rsa"
linux_ami = ec2.GenericLinuxImage({
    "cn-northwest-1": "AMI-ID-IN-cn-northwest-1-REGION",  # Refer to an Exist AMI
    "eu-west-1": "AMI-ID-IN-eu-west-1-REGION"
})
with open("./user_data/user_data.sh") as f:
    user_data = f.read()


class CdkVpcEc2Stack(core.Stack):

    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=vpc_id)

        host = ec2.Instance(self, "myEC2",
                            instance_type=ec2.InstanceType(
                                instance_type_identifier=ec2_type),
                            instance_name="mySingleHost",
                            machine_image=linux_ami,
                            vpc=vpc,
                            key_name=key_name,
                            vpc_subnets=ec2.SubnetSelection(
                                subnet_type=ec2.SubnetType.PUBLIC),
Exemplo n.º 25
0
    ManagedPolicy,
    ServicePrincipal,
    CfnInstanceProfile,
    Effect,
    PolicyStatement,
)
from constant import Constant
import uuid

# 中国两个区域会用到不同的ami_id
ami_map = {
    'cn-northwest-1': Constant.ZHY_EC2_AMI_ID,
    'cn-north-1': Constant.BJ_EC2_AMI_ID,
}

my_ami = ec2.GenericLinuxImage(ami_map)

with open("./user_data/user_data.sh") as f:
    user_data_content = f.read()


class CdkInfraStack(core.Stack):
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # s3
        s3_bucket_name = "{}-s3-{}".format(Constant.PROJECT_NAME,
                                           self._get_UUID(4))
        _s3.Bucket(
            self,
            id=s3_bucket_name,