def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        # Just do it ugly straight line for now
        vpc = ec2.Vpc(self,
                      "Github-Selfhost-vpc",
                      nat_gateways=0,
                      enable_dns_hostnames=True,
                      enable_dns_support=True,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="selfhost_public",
                              subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        # AMI
        # al2 = getLatestAL2Ami()
        # centos = getLatestCentosAmi()
        ubuntu = getLatestUbuntuAmi()

        instances = []

        # Instance creation.
        # Stands up an instance, then installs the github runner on the first boot.
        # This can be made into a loop. TODO
        user_data_focal = ec2.UserData.for_linux()
        user_data_focal.add_commands(
            "apt-get update -y", "apt-get upgrade -y",
            "apt-get install -y curl software-properties-common",
            "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -",
            "add-apt-repository "
            "'deb [arch=arm64] https://download.docker.com/linux/ubuntu focal stable'",
            "apt-get update -y",
            "apt-get install -y docker-ce docker-ce-cli containerd.io",
            "systemctl start docker")
        instance_focal1 = ec2.Instance(
            self,
            "focal1-tester",
            instance_type=ec2.InstanceType("m6g.large"),
            machine_image=ubuntu,
            vpc=vpc,
            key_name=KEY_NAME,
            block_devices=[
                ec2.BlockDevice(device_name='/dev/sda1',
                                volume=ec2.BlockDeviceVolume(
                                    ec2.EbsDeviceProps(volume_size=128)))
            ],
            user_data=user_data_focal)
        instances.append(instance_focal1)

        # Allow inbound HTTPS connections
        for instance in instances:
            instance.connections.allow_from_any_ipv4(
                ec2.Port.tcp(443), 'Allow inbound HTTPS connections')
            if AWS_PREFIX_LIST:
                instance.connections.allow_from(
                    ec2.Peer.prefix_list(AWS_PREFIX_LIST), ec2.Port.tcp(22),
                    'Allow inbound SSH connections from trusted sources')
Esempio n. 2
0
 def get_block_device(self, params):
     volume = ec2.BlockDeviceVolume.ebs(
         delete_on_termination=params['delete_on_termination'],
         volume_size=params['volume_size'],
         volume_type=ec2.EbsDeviceVolumeType(params['volume_type']))
     block_device = ec2.BlockDevice(device_name=params['device_name'],
                                    volume=volume)
     return block_device
Esempio n. 3
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 runnerrole: iam.IRole, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # The code that defines your stack goes here
        token = self.node.try_get_context("gitlab-token")
        shellCommands = ec2.UserData.for_linux()
        shellCommands.add_commands("yum update -y")
        shellCommands.add_commands("yum install docker -y")
        shellCommands.add_commands("systemctl start docker")
        shellCommands.add_commands("systemctl enable docker")
        shellCommands.add_commands("usermod -aG docker ec2-user")
        shellCommands.add_commands("usermod -aG docker ssm-user")
        shellCommands.add_commands("chmod +x /var/run/docker.sock")
        shellCommands.add_commands("systemctl restart docker")
        shellCommands.add_commands(
            "docker run -d -v /home/ec2-user/.gitlab-runner:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock --name gitlab-runner-register gitlab/gitlab-runner:alpine register --non-interactive --url https://gitlab.com./ --registration-token "
            + token +
            " --docker-volumes \"/var/run/docker.sock:/var/run/docker.sock\" --executor docker --docker-image \"alpine:latest\" --description \"Docker Runner\" --tag-list \"demo,runner,cdk\" --docker-privileged"
        )
        shellCommands.add_commands(
            "sleep 2 && docker run --restart always -d -v /home/ec2-user/.gitlab-runner:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock --name gitlab-runner gitlab/gitlab-runner:alpine"
        )

        runnerSG = ec2.SecurityGroup(
            self,
            'Gitlab-Runner-SG',
            vpc=vpc,
            security_group_name="Gitlab-Runner-SG",
            description="for aws cdk python lab Gitlab-Runner SG")
        shellCommands.add_commands("yum install mysql -y ")
        runner = ec2.Instance(
            self,
            'Gitlab-Runner',
            instance_type=ec2.InstanceType(
                instance_type_identifier="t3.small"),
            instance_name='Gitlab-Runner',
            role=runnerrole,
            vpc=vpc,
            security_group=runnerSG,
            key_name=my_key_pair,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            machine_image=ec2.LookupMachineImage(
                name="amzn2-ami-hvm-2.0.20200406.0-x86_64-gp2",
                user_data=shellCommands),
            block_devices=[
                ec2.BlockDevice(device_name='/dev/xvda',
                                volume=ec2.BlockDeviceVolume.ebs(60))
            ])

        core.CfnOutput(self, 'instance-id', value=runner.instance_id)
        core.CfnOutput(self, 'runner-role', value=runnerrole.role_arn)
Esempio n. 4
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define SSM command document.
        ecr_repo = "{}.dkr.ecr.{}.amazonaws.com/{}".format(AWS_ACCOUNT, AWS_REGION, WINDOWS_X86_ECR_REPO)
        placeholder_map = {"ECR_PLACEHOLDER": ecr_repo, "GITHUB_OWNER_PLACEHOLDER": GITHUB_REPO_OWNER,
                           "REGION_PLACEHOLDER": AWS_REGION, "GITHUB_SOURCE_VERSION_PLACEHOLDER": GITHUB_SOURCE_VERSION}
        content = YmlLoader.load("./cdk/ssm/windows_docker_build_ssm_document.yaml", placeholder_map)
        ssm.CfnDocument(scope=self,
                        id="{}-ssm-document".format(id),
                        name=SSM_DOCUMENT_NAME,
                        content=content,
                        document_type="Command")

        # Define a S3 bucket to store windows docker files and build scripts.
        s3.Bucket(scope=self,
                  id="{}-s3".format(id),
                  bucket_name=S3_BUCKET_NAME,
                  block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # Define a role for EC2.
        ecr_power_user_policy = iam.PolicyDocument.from_json(ecr_power_user_policy_in_json([WINDOWS_X86_ECR_REPO]))
        s3_read_write_policy = iam.PolicyDocument.from_json(s3_read_write_policy_in_json(S3_BUCKET_NAME))
        inline_policies = {"ecr_power_user_policy": ecr_power_user_policy, "s3_read_write_policy": s3_read_write_policy}
        role = iam.Role(scope=self, id="{}-role".format(id),
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
                        inline_policies=inline_policies,
                        managed_policies=[
                            iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore")
                        ])

        # Define Windows EC2 instance, where the SSM document will be executed.
        machine_image = ec2.MachineImage.latest_windows(ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_CONTAINERSLATEST)
        vpc = ec2.Vpc(scope=self, id="{}-vpc".format(id))
        block_device_volume = ec2.BlockDeviceVolume.ebs(volume_size=200, delete_on_termination=True)
        block_device = ec2.BlockDevice(device_name="/dev/sda1", volume=block_device_volume)
        instance = ec2.Instance(scope=self,
                                id="{}-instance".format(id),
                                instance_type=ec2.InstanceType(instance_type_identifier="m5d.xlarge"),
                                vpc=vpc,
                                role=role,
                                block_devices=[block_device],
                                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                                machine_image=machine_image)

        core.Tags.of(instance).add(WIN_EC2_TAG_KEY, WIN_EC2_TAG_VALUE)
Esempio n. 5
0
    def provision_managed_nodegroup(self, name: str, ng: Type[EKS.NodegroupBase], max_nodegroup_azs: int) -> None:
        ami_id, user_data = self._get_machine_image(name, ng.machine_image)
        machine_image: Optional[ec2.IMachineImage] = (
            ec2.MachineImage.generic_linux({self.scope.region: ami_id}) if ami_id else None
        )
        mime_user_data: Optional[ec2.UserData] = self._handle_user_data(name, ami_id, ng.ssm_agent, [user_data])

        lt = ec2.LaunchTemplate(
            self.cluster,
            f"LaunchTemplate{name}",
            key_name=ng.key_name,
            launch_template_name=f"{self.name}-{name}",
            block_devices=[
                ec2.BlockDevice(
                    device_name="/dev/xvda",
                    volume=ec2.BlockDeviceVolume.ebs(
                        ng.disk_size,
                        volume_type=ec2.EbsDeviceVolumeType.GP2,
                    ),
                )
            ],
            machine_image=machine_image,
            user_data=mime_user_data,
        )
        lts = eks.LaunchTemplateSpec(id=lt.launch_template_id, version=lt.version_number)

        for i, az in enumerate(self.vpc.availability_zones[:max_nodegroup_azs]):
            self.cluster.add_nodegroup_capacity(
                f"{self.name}-{name}-{i}",
                nodegroup_name=f"{self.name}-{name}-{az}",
                capacity_type=eks.CapacityType.SPOT if ng.spot else eks.CapacityType.ON_DEMAND,
                min_size=ng.min_size,
                max_size=ng.max_size,
                desired_size=ng.desired_size,
                subnets=ec2.SubnetSelection(
                    subnet_group_name=self.private_subnet_name,
                    availability_zones=[az],
                ),
                instance_types=[ec2.InstanceType(it) for it in ng.instance_types],
                launch_template_spec=lts,
                labels=ng.labels,
                tags=ng.tags,
                node_role=self.ng_role,
            )
Esempio n. 6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16")
        self.eks_vpc = eks_vpc

        # Create IAM Role For code-server bastion
        bastion_role = iam.Role(
            self,
            "BastionRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("ec2.amazonaws.com"),
                iam.AccountRootPrincipal()),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AdministratorAccess")
            ])
        self.bastion_role = bastion_role
        # Create EC2 Instance Profile for that Role
        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile", roles=[bastion_role.role_name])

        # Create SecurityGroup for the Control Plane ENIs
        eks_security_group = ec2.SecurityGroup(self,
                                               "EKSSecurityGroup",
                                               vpc=eks_vpc,
                                               allow_all_outbound=True)

        eks_security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'),
                                            ec2.Port.all_traffic())

        # Create an EKS Cluster
        eks_cluster = eks.Cluster(
            self,
            "cluster",
            cluster_name="cluster",
            vpc=eks_vpc,
            masters_role=bastion_role,
            default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
            default_capacity_instance=ec2.InstanceType("m5.large"),
            default_capacity=2,
            security_group=eks_security_group,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
            version=eks.KubernetesVersion.V1_17)
        self.cluster_cert = eks_cluster.cluster_certificate_authority_data

        # Deploy ALB Ingress Controller
        # Create the k8s Service account and corresponding IAM Role mapped via IRSA
        alb_service_account = eks_cluster.add_service_account(
            "alb-ingress-controller",
            name="alb-ingress-controller",
            namespace="kube-system")

        # Create the PolicyStatements to attach to the role
        # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these
        alb_policy_statement_json_1 = {
            "Effect":
            "Allow",
            "Action": [
                "acm:DescribeCertificate", "acm:ListCertificates",
                "acm:GetCertificate"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_2 = {
            "Effect":
            "Allow",
            "Action": [
                "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup",
                "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup",
                "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses",
                "ec2:DescribeInstances", "ec2:DescribeInstanceStatus",
                "ec2:DescribeInternetGateways",
                "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups",
                "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs",
                "ec2:ModifyInstanceAttribute",
                "ec2:ModifyNetworkInterfaceAttribute",
                "ec2:RevokeSecurityGroupIngress"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_3 = {
            "Effect":
            "Allow",
            "Action": [
                "elasticloadbalancing:AddListenerCertificates",
                "elasticloadbalancing:AddTags",
                "elasticloadbalancing:CreateListener",
                "elasticloadbalancing:CreateLoadBalancer",
                "elasticloadbalancing:CreateRule",
                "elasticloadbalancing:CreateTargetGroup",
                "elasticloadbalancing:DeleteListener",
                "elasticloadbalancing:DeleteLoadBalancer",
                "elasticloadbalancing:DeleteRule",
                "elasticloadbalancing:DeleteTargetGroup",
                "elasticloadbalancing:DeregisterTargets",
                "elasticloadbalancing:DescribeListenerCertificates",
                "elasticloadbalancing:DescribeListeners",
                "elasticloadbalancing:DescribeLoadBalancers",
                "elasticloadbalancing:DescribeLoadBalancerAttributes",
                "elasticloadbalancing:DescribeRules",
                "elasticloadbalancing:DescribeSSLPolicies",
                "elasticloadbalancing:DescribeTags",
                "elasticloadbalancing:DescribeTargetGroups",
                "elasticloadbalancing:DescribeTargetGroupAttributes",
                "elasticloadbalancing:DescribeTargetHealth",
                "elasticloadbalancing:ModifyListener",
                "elasticloadbalancing:ModifyLoadBalancerAttributes",
                "elasticloadbalancing:ModifyRule",
                "elasticloadbalancing:ModifyTargetGroup",
                "elasticloadbalancing:ModifyTargetGroupAttributes",
                "elasticloadbalancing:RegisterTargets",
                "elasticloadbalancing:RemoveListenerCertificates",
                "elasticloadbalancing:RemoveTags",
                "elasticloadbalancing:SetIpAddressType",
                "elasticloadbalancing:SetSecurityGroups",
                "elasticloadbalancing:SetSubnets",
                "elasticloadbalancing:SetWebAcl"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_4 = {
            "Effect":
            "Allow",
            "Action": [
                "iam:CreateServiceLinkedRole", "iam:GetServerCertificate",
                "iam:ListServerCertificates"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_5 = {
            "Effect": "Allow",
            "Action": ["cognito-idp:DescribeUserPoolClient"],
            "Resource": "*"
        }
        alb_policy_statement_json_6 = {
            "Effect":
            "Allow",
            "Action": [
                "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL",
                "waf-regional:AssociateWebACL",
                "waf-regional:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_7 = {
            "Effect": "Allow",
            "Action": ["tag:GetResources", "tag:TagResources"],
            "Resource": "*"
        }
        alb_policy_statement_json_8 = {
            "Effect": "Allow",
            "Action": ["waf:GetWebACL"],
            "Resource": "*"
        }
        alb_policy_statement_json_9 = {
            "Effect":
            "Allow",
            "Action": [
                "wafv2:GetWebACL", "wafv2:GetWebACLForResource",
                "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_10 = {
            "Effect":
            "Allow",
            "Action": [
                "shield:DescribeProtection", "shield:GetSubscriptionState",
                "shield:DeleteProtection", "shield:CreateProtection",
                "shield:DescribeSubscription", "shield:ListProtections"
            ],
            "Resource":
            "*"
        }

        # Attach the necessary permissions
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_1))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_2))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_3))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_4))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_5))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_6))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_7))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_8))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_9))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_10))

        # Deploy the ALB Ingress Controller from the Helm chart
        eks_cluster.add_helm_chart(
            "aws-alb-ingress-controller",
            chart="aws-alb-ingress-controller",
            repository=
            "http://storage.googleapis.com/kubernetes-charts-incubator",
            namespace="kube-system",
            values={
                "clusterName": "cluster",
                "awsRegion": os.environ["CDK_DEFAULT_REGION"],
                "awsVpcID": eks_vpc.vpc_id,
                "rbac": {
                    "create": True,
                    "serviceAccount": {
                        "create": False,
                        "name": "alb-ingress-controller"
                    }
                }
            })

        # Create code-server bastion
        # Get Latest Amazon Linux AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Create SecurityGroup for code-server
        security_group = ec2.SecurityGroup(self,
                                           "SecurityGroup",
                                           vpc=eks_vpc,
                                           allow_all_outbound=True)

        security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                        ec2.Port.tcp(8080))

        # Create our EC2 instance running CodeServer
        code_server_instance = ec2.Instance(
            self,
            "CodeServerInstance",
            instance_type=ec2.InstanceType("t3.large"),
            machine_image=amzn_linux,
            role=bastion_role,
            vpc=eks_vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=security_group,
            block_devices=[
                ec2.BlockDevice(device_name="/dev/xvda",
                                volume=ec2.BlockDeviceVolume.ebs(20))
            ])

        # Add UserData
        code_server_instance.user_data.add_commands(
            "mkdir -p ~/.local/lib ~/.local/bin ~/.config/code-server")
        code_server_instance.user_data.add_commands(
            "curl -fL https://github.com/cdr/code-server/releases/download/v3.5.0/code-server-3.5.0-linux-amd64.tar.gz | tar -C ~/.local/lib -xz"
        )
        code_server_instance.user_data.add_commands(
            "mv ~/.local/lib/code-server-3.5.0-linux-amd64 ~/.local/lib/code-server-3.5.0"
        )
        code_server_instance.user_data.add_commands(
            "ln -s ~/.local/lib/code-server-3.5.0/bin/code-server ~/.local/bin/code-server"
        )
        code_server_instance.user_data.add_commands(
            "echo \"bind-addr: 0.0.0.0:8080\" > ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"auth: password\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "echo \"password: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)\" >> ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"cert: false\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "~/.local/bin/code-server &")
        code_server_instance.user_data.add_commands(
            "yum -y install jq gettext bash-completion moreutils")
        code_server_instance.user_data.add_commands(
            "sudo pip install --upgrade awscli && hash -r")
        code_server_instance.user_data.add_commands(
            "echo 'export ALB_INGRESS_VERSION=\"v1.1.8\"' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "curl --silent --location -o /usr/local/bin/kubectl \"https://amazon-eks.s3.us-west-2.amazonaws.com/1.17.9/2020-08-04/bin/linux/amd64/kubectl\""
        )
        code_server_instance.user_data.add_commands(
            "chmod +x /usr/local/bin/kubectl")
        code_server_instance.user_data.add_commands(
            "curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash"
        )
        code_server_instance.user_data.add_commands(
            "export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account)"
        )
        code_server_instance.user_data.add_commands(
            "export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export ACCOUNT_ID=${ACCOUNT_ID}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export AWS_REGION=${AWS_REGION}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "aws configure set default.region ${AWS_REGION}")
        code_server_instance.user_data.add_commands(
            "curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -"
        )
        code_server_instance.user_data.add_commands("yum -y install nodejs")
        code_server_instance.user_data.add_commands(
            "amazon-linux-extras enable python3")
        code_server_instance.user_data.add_commands(
            "yum install -y python3 --disablerepo amzn2-core")
        code_server_instance.user_data.add_commands("yum install -y git")
        code_server_instance.user_data.add_commands(
            "rm /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip"
        )
        code_server_instance.user_data.add_commands("npm install -g aws-cdk")
        code_server_instance.user_data.add_commands(
            "echo 'export KUBECONFIG=~/.kube/config' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "git clone https://github.com/jasonumiker/eks-school.git")

        # Add ALB
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=eks_vpc,
                                           internet_facing=True)
        listener = lb.add_listener("Listener", port=80)
        listener.connections.allow_default_port_from_any_ipv4(
            "Open to the Internet")
        listener.connections.allow_to_any_ipv4(
            port_range=ec2.Port(string_representation="TCP 8080",
                                protocol=ec2.Protocol.TCP,
                                from_port=8080,
                                to_port=8080))
        listener.add_targets(
            "Target",
            port=8080,
            targets=[
                elbv2.InstanceTarget(
                    instance_id=code_server_instance.instance_id, port=8080)
            ])
Esempio n. 7
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 gitlab: cdk.Stack, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        tags = cdk.Tags.of(self)
        tags.add(key='Stage', value='DevOps')
        tags.add(key='Module', value='Runner')
        tags.add(key='Owner', value='Vunk.Lai')
        tags.add(key='Name', value='GitLab/Runner', apply_to_launched_instances=True)

        subnets = gitlab.vpc.select_subnets(subnet_group_name='Runner').subnets

        security_group = ec2.SecurityGroup(
            self, 'sg',
            vpc=gitlab.vpc,
            security_group_name='GitLab/Runner:SecurityGroup',
            description='Default Runner Security Group',
            allow_all_outbound=True)

        policy = iam.ManagedPolicy(
            self, 'policy',
            # Use alphanumeric and '+=,.@-_' characters
            managed_policy_name='GitLab-Runner_Policy',
            description='SSM Login',
            statements=[
                iam.PolicyStatement(
                    actions=['ssmmessages:*', 'ssm:UpdateInstanceInformation'],
                    resources=['*']),
            ])

        role = iam.Role(
            self, 'role',
            # Use alphanumeric and '+=,.@-_' characters
            role_name='GitLab-Runner_Role',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[policy])

        folder = Path(__file__).parent.parent / 'user_data'
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(
            'apt install unzip',
            'curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "aws_cli_v2.zip"',
            'unzip aws_cli_v2.zip',
            'sudo ./aws/install',
            'aws --version')
        asset = Asset(self, 'asset:userdata', path=str(folder / 'runner.sh'))
        asset.grant_read(role)
        path = user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)
        user_data.add_execute_file_command(
            file_path=path, arguments='--verbose -y')

        template = ec2.LaunchTemplate(
            self, 'template',
            launch_template_name='GitLab/Runner_LaunchTemplate',
            cpu_credits=ec2.CpuCredits.STANDARD,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MICRO),
            machine_image=ec2.MachineImage.lookup(
                name='ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*',
                owners=['099720109477']),
            role=role,
            security_group=security_group,
            user_data=user_data,
            block_devices=[
                ec2.BlockDevice(
                    device_name='/dev/sda1',
                    volume=ec2.BlockDeviceVolume.ebs(
                        volume_size=20,
                        volume_type=ec2.EbsDeviceVolumeType.GP3,
                        delete_on_termination=True,
                    )),
            ]
        )

        ec2.CfnInstance(
            self, 'instance',
            launch_template=ec2.CfnInstance.LaunchTemplateSpecificationProperty(
                version=template.latest_version_number,
                launch_template_id=template.launch_template_id,
            ),
            subnet_id=subnets[0].subnet_id
        )
Esempio n. 8
0
    def provision_unmanaged_nodegroup(self, name: str, ng: Type[EKS.NodegroupBase], max_nodegroup_azs: int) -> None:
        ami_id, user_data = self._get_machine_image(name, ng.machine_image)

        machine_image = (
            ec2.MachineImage.generic_linux({self.scope.region: ami_id})
            if ami_id
            else eks.EksOptimizedImage(
                cpu_arch=eks.CpuArch.X86_64,
                kubernetes_version=self.eks_version.version,
                node_type=eks.NodeType.GPU if ng.gpu else eks.NodeType.STANDARD,
            )
        )

        if not hasattr(self, "unmanaged_sg"):
            self.unmanaged_sg = ec2.SecurityGroup(
                self.scope,
                "UnmanagedSG",
                vpc=self.vpc,
                security_group_name=f"{self.name}-sharedNodeSG",
                allow_all_outbound=False,
            )

        if self.bastion_sg:
            self.unmanaged_sg.add_ingress_rule(
                peer=self.bastion_sg,
                connection=ec2.Port(
                    protocol=ec2.Protocol("TCP"),
                    string_representation="ssh",
                    from_port=22,
                    to_port=22,
                ),
            )

        scope = cdk.Construct(self.scope, f"UnmanagedNodeGroup{name}")
        cfn_lt = None
        for i, az in enumerate(self.vpc.availability_zones[:max_nodegroup_azs]):
            indexed_name = f"{self.name}-{name}-{az}"
            asg = aws_autoscaling.AutoScalingGroup(
                scope,
                f"{self.name}-{name}-{i}",
                auto_scaling_group_name=indexed_name,
                instance_type=ec2.InstanceType(ng.instance_types[0]),
                machine_image=machine_image,
                vpc=self.cluster.vpc,
                min_capacity=ng.min_size,
                max_capacity=ng.max_size,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_group_name=self.private_subnet_name,
                    availability_zones=[az],
                ),
                role=self.ng_role,
                security_group=self.unmanaged_sg,
            )
            for k, v in (
                {
                    **ng.tags,
                    **{
                        f"k8s.io/cluster-autoscaler/{self.cluster.cluster_name}": "owned",
                        "k8s.io/cluster-autoscaler/enabled": "true",
                        "eks:cluster-name": self.cluster.cluster_name,
                        "Name": indexed_name,
                    },
                }
            ).items():
                cdk.Tags.of(asg).add(str(k), str(v), apply_to_launched_instances=True)

            mime_user_data = self._handle_user_data(name, ami_id, ng.ssm_agent, [asg.user_data, user_data])

            if not cfn_lt:
                lt = ec2.LaunchTemplate(
                    scope,
                    f"LaunchTemplate{i}",
                    launch_template_name=indexed_name,
                    block_devices=[
                        ec2.BlockDevice(
                            device_name="/dev/xvda",
                            volume=ec2.BlockDeviceVolume.ebs(
                                ng.disk_size,
                                volume_type=ec2.EbsDeviceVolumeType.GP2,
                            ),
                        )
                    ],
                    role=self.ng_role,
                    instance_type=ec2.InstanceType(ng.instance_types[0]),
                    key_name=ng.key_name,
                    machine_image=machine_image,
                    user_data=mime_user_data,
                    security_group=self.unmanaged_sg,
                )
                # mimic adding the security group via the ASG during connect_auto_scaling_group_capacity
                lt.connections.add_security_group(self.cluster.cluster_security_group)
                cfn_lt: ec2.CfnLaunchTemplate = lt.node.default_child
                lt_data = ec2.CfnLaunchTemplate.LaunchTemplateDataProperty(
                    **cfn_lt.launch_template_data._values,
                    metadata_options=ec2.CfnLaunchTemplate.MetadataOptionsProperty(
                        http_endpoint="enabled", http_tokens="required", http_put_response_hop_limit=2
                    ),
                )
                cfn_lt.launch_template_data = lt_data

            # https://github.com/aws/aws-cdk/issues/6734
            cfn_asg: aws_autoscaling.CfnAutoScalingGroup = asg.node.default_child
            # Remove the launch config from our stack
            asg.node.try_remove_child("LaunchConfig")
            cfn_asg.launch_configuration_name = None
            # Attach the launch template to the auto scaling group
            cfn_asg.mixed_instances_policy = cfn_asg.MixedInstancesPolicyProperty(
                launch_template=cfn_asg.LaunchTemplateProperty(
                    launch_template_specification=cfn_asg.LaunchTemplateSpecificationProperty(
                        launch_template_id=cfn_lt.ref,
                        version=lt.version_number,
                    ),
                    overrides=[cfn_asg.LaunchTemplateOverridesProperty(instance_type=it) for it in ng.instance_types],
                ),
            )

            options: dict[str, Any] = {
                "bootstrap_enabled": ami_id is None,
            }
            if not ami_id:
                extra_args: list[str] = []
                if labels := ng.labels:
                    extra_args.append(
                        "--node-labels={}".format(",".join(["{}={}".format(k, v) for k, v in labels.items()]))
                    )

                if taints := ng.taints:
                    extra_args.append(
                        "--register-with-taints={}".format(",".join(["{}={}".format(k, v) for k, v in taints.items()]))
                    )
                options["bootstrap_options"] = eks.BootstrapOptions(kubelet_extra_args=" ".join(extra_args))
Esempio n. 9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        """
        Creates a VPC with 2 public and 2 private subnets,
        each subnet with a /18 CIDR range,
        a 2 NAT gateways
        """
        vpc = ec2.Vpc(
            self, "MindLAMPVPC",
            cidr="10.10.0.0/16"
        )

        # Security group for instances
        security_group = ec2.SecurityGroup(
            self, "MindLAMPSecurityGroup",
            vpc=vpc,
            description="Security group for LAMP Platform instances"
        )

        # Allow TCP to port 80 from 0.0.0.0/0
        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(80),
            description="Allow HTTP connections to port 80"
        )
        # Allow TCP to port 443 from 0.0.0.0/0
        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(443),
            description="Allow HTTP connections to port 443"
        )

        # Allow TCP to port 443 from 0.0.0.0/0
        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv6(),
            connection=ec2.Port.tcp(443),
            description="Allow HTTP connections to port 443 with ipv6"
        )

        # Install docker on boot
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(
            "yum -y install docker && usermod -a -G docker ec2-user")

        # The EC2 instance
        instance1 = ec2.Instance(
            self, "MindLAMPInstance1",
            instance_type=ec2.InstanceType("t3a.large"),
            machine_image=ec2.MachineImage.latest_amazon_linux(),
            user_data=user_data,
            instance_name="LAMP platform",
            block_devices=[
                ec2.BlockDevice(
                    device_name="/dev/sdf",
                    volume=ec2.BlockDeviceVolume.ebs(
                        volume_size=30,
                        encrypted=True,
                        delete_on_termination=True
                    )
                ),
                ec2.BlockDevice(
                    device_name="/dev/sdg",
                    volume=ec2.BlockDeviceVolume.ebs(
                        volume_size=100,
                        encrypted=True,
                        delete_on_termination=False
                    )
                )
            ],
            security_group=security_group,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
        )

        # Associate the SSM managed policy for SSM control + access
        instance1.role.add_managed_policy(
            policy=iam.ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonSSMManagedInstanceCore")
        )

        # Get an existing Route53 hosted zone
        hosted_zone = route53.HostedZone.from_hosted_zone_attributes(
            self, "MindLAMPHostedZone",
            hosted_zone_id=self.node.try_get_context("hosted_zone_id"),
            zone_name=self.node.try_get_context("zone_name")
        )

        # Create an A record to point to the public IP of instance1
        record_set1 = route53.RecordSet(
            self, "Node1RecordSet",
            record_type=route53.RecordType.A,
            target=route53.RecordTarget(values=[instance1.instance_public_ip]),
            zone=hosted_zone,
            record_name="node1"
        )
Esempio n. 10
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 ecr_repo: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Fetch environment variables.
        s3_bucket_name = EnvUtil.get("S3_FOR_WIN_DOCKER_IMG_BUILD", "windows-docker-images")
        win_ec2_tag_key = EnvUtil.get("WIN_EC2_TAG_KEY", "aws-lc")
        win_ec2_tag_value = EnvUtil.get("WIN_EC2_TAG_VALUE", "aws-lc-windows")
        ssm_document_name = EnvUtil.get("WIN_DOCKER_BUILD_SSM_DOCUMENT", "aws-lc-windows-docker-ssm-doc")

        # Define a S3 bucket to store windows docker files and build scripts.
        s3.Bucket(scope=self,
                  id="{}-s3".format(id),
                  bucket_name=s3_bucket_name,
                  block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # Define SSM command document.
        aws_account_id = kwargs["env"]["account"]
        aws_region = kwargs["env"]["region"]
        ecr_repo = "{}.dkr.ecr.{}.amazonaws.com/{}".format(aws_account_id, aws_region, ecr_repo)
        with open('./cdk/windows_docker_build_ssm_document.yaml') as file:
            file_text = file.read().replace("ECR_PLACEHOLDER", ecr_repo) \
                .replace("S3_BUCKET_PLACEHOLDER", s3_bucket_name) \
                .replace("REGION_PLACEHOLDER", aws_region)
            content = yaml.load(file_text, Loader=yaml.FullLoader)
            ssm.CfnDocument(scope=self,
                            id="{}-ssm-document".format(id),
                            name=ssm_document_name,
                            content=content,
                            document_type="Command")

        # Define a role for EC2.
        s3_read_write_policy = iam.PolicyDocument.from_json(
            {
                "Version": "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": [
                            "s3:Put*",
                            "s3:Get*"
                        ],
                        "Resource": [
                            "arn:aws:s3:::{}/*".format(s3_bucket_name)
                        ]
                    }
                ]
            }
        )
        env = kwargs['env']
        ecr_power_user_policy = iam.PolicyDocument.from_json(
            {
                "Version": "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": [
                            "ecr:GetAuthorizationToken"
                        ],
                        "Resource": "*"
                    },
                    {
                        "Effect": "Allow",
                        "Action": [
                            "ecr:BatchCheckLayerAvailability",
                            "ecr:GetDownloadUrlForLayer",
                            "ecr:GetRepositoryPolicy",
                            "ecr:DescribeRepositories",
                            "ecr:ListImages",
                            "ecr:DescribeImages",
                            "ecr:BatchGetImage",
                            "ecr:GetLifecyclePolicy",
                            "ecr:GetLifecyclePolicyPreview",
                            "ecr:ListTagsForResource",
                            "ecr:DescribeImageScanFindings",
                            "ecr:InitiateLayerUpload",
                            "ecr:UploadLayerPart",
                            "ecr:CompleteLayerUpload",
                            "ecr:PutImage"
                        ],
                        "Resource": "arn:aws:ecr:{}:{}:repository/{}".format(env['region'], env['account'], ecr_repo)
                    }
                ]
            }
        )
        inline_policies = {"s3_read_write_policy": s3_read_write_policy, "ecr_power_user_policy": ecr_power_user_policy}
        role = iam.Role(scope=self, id="{}-role".format(id),
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
                        inline_policies=inline_policies,
                        managed_policies=[
                            iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore")
                        ])

        # Define Windows EC2 instance, where the SSM document will be executed.
        machine_image = ec2.MachineImage.latest_windows(ec2.WindowsVersion.WINDOWS_SERVER_2016_ENGLISH_FULL_CONTAINERS)
        vpc = ec2.Vpc(scope=self, id="{}-vpc".format(id))
        block_device_volume = ec2.BlockDeviceVolume.ebs(volume_size=200, delete_on_termination=True)
        block_device = ec2.BlockDevice(device_name="/dev/sda1", volume=block_device_volume)
        instance = ec2.Instance(scope=self,
                                id="{}-instance".format(id),
                                instance_type=ec2.InstanceType(instance_type_identifier="m5d.xlarge"),
                                vpc=vpc,
                                role=role,
                                block_devices=[block_device],
                                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                                machine_image=machine_image)

        core.Tag.add(instance, win_ec2_tag_key, win_ec2_tag_value)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        code_server_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16")

        # Create IAM Role For code-build
        code_server_role = iam.Role(
            self,
            "CodeServerRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("ec2.amazonaws.com")),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AdministratorAccess")
            ])

        # Create EC2 Instance Profile for that Role
        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile", roles=[code_server_role.role_name])

        # Latest Amazon Linux AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Create SecurityGroup
        security_group = ec2.SecurityGroup(self,
                                           "SecurityGroup",
                                           vpc=code_server_vpc,
                                           allow_all_outbound=True)

        security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                        ec2.Port.tcp(8080))

        # Create our EC2 instance running CodeServer
        code_server_instance = ec2.Instance(
            self,
            "CodeServerInstance",
            instance_type=ec2.InstanceType("t3.large"),
            machine_image=amzn_linux,
            role=code_server_role,
            vpc=code_server_vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=security_group,
            block_devices=[
                ec2.BlockDevice(device_name="/dev/xvda",
                                volume=ec2.BlockDeviceVolume.ebs(20))
            ])

        # Add UserData
        code_server_instance.user_data.add_commands(
            "mkdir -p ~/.local/lib ~/.local/bin ~/.config/code-server")
        code_server_instance.user_data.add_commands(
            "curl -fL https://github.com/cdr/code-server/releases/download/v3.5.0/code-server-3.5.0-linux-amd64.tar.gz | tar -C ~/.local/lib -xz"
        )
        code_server_instance.user_data.add_commands(
            "mv ~/.local/lib/code-server-3.5.0-linux-amd64 ~/.local/lib/code-server-3.5.0"
        )
        code_server_instance.user_data.add_commands(
            "ln -s ~/.local/lib/code-server-3.5.0/bin/code-server ~/.local/bin/code-server"
        )
        code_server_instance.user_data.add_commands(
            "echo \"bind-addr: 0.0.0.0:8080\" > ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"auth: password\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "echo \"password: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)\" >> ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"cert: false\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "~/.local/bin/code-server &")

        # Add ALB
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=code_server_vpc,
                                           internet_facing=True)
        listener = lb.add_listener("Listener", port=80)
        listener.connections.allow_default_port_from_any_ipv4(
            "Open to the Internet")
        listener.connections.allow_to_any_ipv4(
            port_range=ec2.Port(string_representation="TCP 8080",
                                protocol=ec2.Protocol.TCP,
                                from_port=8080,
                                to_port=8080))
        listener.add_targets(
            "Target",
            port=8080,
            targets=[
                elbv2.InstanceTarget(
                    instance_id=code_server_instance.instance_id, port=8080)
            ])
Esempio n. 12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get/set stack name for context
        self.node.set_context("STACK_NAME", self.stack_name)

        # The code that defines your stack goes here
        # Set a vpc
        vpc = ec2.Vpc.from_lookup(self, "VPC", is_default=True)
        vpc_subnets = ec2.SubnetSelection()

        # Set access policies for the instance
        policies = [
            # Read only access for all our s3 buckets
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3ReadOnlyAccess"),
            # Set the container registry policy so we can pull docker containers from our ECR repo
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonEC2ContainerRegistryReadOnly"),
            # Allow us login by the ssm manger
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore")
        ]

        # Get role object with set policies
        role = iam.Role(self,
                        "EC2Role",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
                        managed_policies=policies)

        # Get a root ebs volume (we mount it on /dev/xvda1)
        ebs_var_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("VAR_VOLUME_SIZE")))
        # Place volume on a block device with the set mount point
        ebs_var_block_device = ec2.BlockDevice(device_name="/dev/sdf",
                                               volume=ebs_var_vol)

        # Get volume - contains a block device volume and a block device
        ebs_extended_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("EXTENDED_VOLUME_SIZE")))
        # Place volume on a block device with a set mount point
        ebs_extended_block_device = ec2.BlockDevice(device_name="/dev/sdg",
                                                    volume=ebs_extended_vol)

        # Run boot strap -
        """
        The code under userdata.sh completes the following steps
        1. Installs docker into ec2 instance
        2. Mounts our volume to /mnt/
        3. Log into docker
        """

        mappings = {
            "__ACCOUNT_ID__": str(self.account),
            "__REGION__": str(self.region)
        }

        with open("user_data/user_data.sh", 'r') as user_data_h:
            # Use a substitution
            user_data_sub = core.Fn.sub(user_data_h.read(), mappings)

        # Import substitution object into user_data set
        user_data = ec2.UserData.custom(user_data_sub)

        # Set instance type from ec2-type in context
        instance_type = ec2.InstanceType(
            instance_type_identifier=self.node.try_get_context("EC2_TYPE"))

        # Get machine type from context
        machine_image = ec2.GenericLinuxImage({
            self.region:
            self.node.try_get_context(
                "MACHINE_IMAGE"),  # Refer to an existing AMI type
        })

        # The code that defines your stack goes here
        # We take all of the parameters we have and place this into the ec2 instance class
        # Except LaunchTemplate which is added as a property to the instance
        host = ec2.Instance(
            self,
            id="{}-instance".format(self.node.try_get_context("STACK_NAME")),
            instance_type=instance_type,
            instance_name=self.node.try_get_context("INSTANCE_NAME"),
            machine_image=machine_image,
            vpc=vpc,
            vpc_subnets=vpc_subnets,
            role=role,
            user_data=user_data,
            block_devices=[ebs_var_block_device, ebs_extended_block_device],
        )

        if self.node.try_get_context("USE_SPOT_INSTANCE").lower() == 'true':
            # Spot pricing via ec2 fleet
            spot_price = self.node.try_get_context("MAX_SPOT_PRICE")
            market_options = {"MarketType": "spot"}
            if spot_price is not None:
                spot_options = {"MaxPrice": spot_price}
                market_options["SpotOptions"] = spot_options
            launch_template_data = {"InstanceMarketOptions": market_options}
            launch_template = ec2.CfnLaunchTemplate(self, "LaunchTemplate")
            launch_template.add_property_override("LaunchTemplateData",
                                                  launch_template_data)

            host.instance.add_property_override(
                "LaunchTemplate", {
                    "LaunchTemplateId": launch_template.ref,
                    "Version": launch_template.attr_latest_version_number
                })

        # Return public IP address s.t we can ssh into it
        # Note that we may return an IP prior to the user_data shell script completing so not
        # all of our goodies may be here yet
        core.CfnOutput(self, "Output", value=host.instance_id)
    def __init__(self, scope: Construct, cid: str,
                 component: str,
                 network: GenericNetwork,
                 security: GenericSecurity,
                 workload_key: _kms.Key,
                 ops_key: KeyPair = None,
                 vpc_subnets: _ec2.SubnetSelection = None,
                 ami_id: str = None,
                 private_ip: str = None,
                 **kwargs):
        super().__init__(scope, cid, **kwargs)

        self.instance_id = ""
        self._workload_key = workload_key
        sec_group = security.get_security_group(component + "SG")
        if not sec_group:
            sec_group = security.create_security_group(component + "SG")
            endpt_sg = security.get_security_group("VPCEndpointSG")
            endpt_sg.connections.allow_from(
                sec_group,
                port_range=_ec2.Port(
                    protocol=_ec2.Protocol.TCP,
                    string_representation=component + " -> Endpoint (443)",
                    from_port=443,
                    to_port=443
                ),
                description="VPC Endpoint Ingress rule from " + component
            )
            sec_group.connections.allow_to(
                endpt_sg,
                port_range=_ec2.Port(
                    protocol=_ec2.Protocol.TCP,
                    string_representation=component + " -> Endpoint (443)",
                    from_port=443,
                    to_port=443
                ),
                description="Egress rule to VPC Endpoint for " + component

            )

        instance_type = _ec2.InstanceType.of(instance_class=_ec2.InstanceClass.STANDARD5,
                                             instance_size=_ec2.InstanceSize.XLARGE)
        key_name = None
        if ops_key is not None:
            key_name = ops_key.key_pair_name

        if vpc_subnets is None:
            vpc_subnets = _ec2.SubnetSelection(subnet_group_name=component)

        user_data = None
        if ami_id is None:
            machine_image = _ec2.MachineImage.lookup(
                name="RHEL-8.3.0_HVM-????????-x86_64-0-Hourly2-GP2", owners=["309956199498"])
            user_data = _ec2.UserData.for_linux()
            for line in get_user_data(self.region, self.node.try_get_context("qs_s3_bucket")):
                user_data.add_commands(line)
        else:
            machine_image = _ec2.MachineImage.lookup(name="*", filters={"image-id": [ami_id]})

        instance_role = security.get_instance_role(component)
        if not instance_role:
            instance_role = security.create_instance_role(component)

        # noinspection PyTypeChecker
        self.instance = _ec2.Instance(self, cid, instance_type=instance_type,
                                      machine_image=machine_image,
                                      block_devices=[_ec2.BlockDevice(
                                          device_name="/dev/sda1",
                                          volume=_ec2.BlockDeviceVolume.ebs(
                                              volume_size=100, encrypted=True))],
                                      vpc=network.get_vpc(),
                                      role=instance_role, security_group=sec_group,
                                      vpc_subnets=vpc_subnets, key_name=key_name,
                                      private_ip_address=private_ip, user_data=user_data)
        self.instance_id = self.instance.instance_id
Esempio n. 14
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        tags = cdk.Tags.of(self)
        tags.add(key='Stage', value='DevOps')
        tags.add(key='Module', value='GitLab')
        tags.add(key='Owner', value='Vunk.Lai')
        tags.add(key='Name',
                 value='GitLab/GitLab',
                 apply_to_launched_instances=True)

        vpc = ec2.Vpc(
            self,
            'vpc',
            max_azs=1,
            cidr=ec2.Vpc.DEFAULT_CIDR_RANGE,
            nat_gateways=0,
            subnet_configuration=[
                ec2.SubnetConfiguration(name='Generic',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=24,
                                        reserved=True),
                ec2.SubnetConfiguration(name='GitLab',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=24),
                ec2.SubnetConfiguration(name='Runner',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=24),
            ])
        cdk.Tags.of(vpc).add(key='Name', value='GitLab/VPC')

        subnets = vpc.select_subnets(subnet_group_name='GitLab').subnets

        security_group = ec2.SecurityGroup(
            self,
            'sg',
            vpc=vpc,
            security_group_name='GitLab/GitLab:SecurityGroup',
            description='Default GitLab Security Group',
            allow_all_outbound=True)
        security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80),
                                        'LetsEncrypt HTTP-01')
        security_group.add_ingress_rule(ec2.Peer.ipv4(self.home_ip),
                                        ec2.Port.tcp(443), 'Home')
        security_group.add_ingress_rule(
            ec2.Peer.ipv4(ec2.Vpc.DEFAULT_CIDR_RANGE), ec2.Port.tcp(443),
            'LAN')

        policy = iam.ManagedPolicy(
            self,
            'policy',
            # Use alphanumeric and '+=,.@-_' characters
            managed_policy_name='GitLab-GitLab_Policy',
            description='SSM Login',
            statements=[
                iam.PolicyStatement(
                    actions=['ssmmessages:*', 'ssm:UpdateInstanceInformation'],
                    resources=['*']),
            ])

        role = iam.Role(
            self,
            'role',
            # Use alphanumeric and '+=,.@-_' characters
            role_name='GitLab-GitLab_Role',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[policy])

        folder = Path(__file__).parent.parent / 'user_data'
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(
            'apt install unzip',
            'curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "aws_cli_v2.zip"',
            'unzip aws_cli_v2.zip', 'sudo ./aws/install', 'aws --version')
        asset = Asset(self, 'asset:gitlab.rb', path=str(folder / 'gitlab.rb'))
        asset.grant_read(role)
        user_data.add_s3_download_command(bucket=asset.bucket,
                                          bucket_key=asset.s3_object_key,
                                          local_file='/etc/gitlab/gitlab.rb')
        asset = Asset(self, 'asset:userdata', path=str(folder / 'gitlab.sh'))
        asset.grant_read(role)
        path = user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)
        user_data.add_execute_file_command(file_path=path,
                                           arguments='--verbose -y')
        # asset = Asset(
        #     self, 'asset:prometheus:rules', path=str(folder / 'gitlab.rules.json'))

        template = ec2.LaunchTemplate(
            self,
            'template',
            # Use alphanumeric and '-()./_' characters
            launch_template_name='GitLab/GitLab_LaunchTemplate',
            cpu_credits=ec2.CpuCredits.STANDARD,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,
                                              ec2.InstanceSize.MEDIUM),
            machine_image=ec2.MachineImage.lookup(
                name='ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*',
                owners=['099720109477']),
            role=role,
            security_group=security_group,
            user_data=user_data,
            block_devices=[
                ec2.BlockDevice(device_name='/dev/sda1',
                                volume=ec2.BlockDeviceVolume.ebs(
                                    volume_size=8,
                                    volume_type=ec2.EbsDeviceVolumeType.GP3,
                                    delete_on_termination=True,
                                )),
                ec2.BlockDevice(device_name='/dev/sdf',
                                volume=ec2.BlockDeviceVolume.ebs(
                                    volume_size=20,
                                    volume_type=ec2.EbsDeviceVolumeType.GP3,
                                    delete_on_termination=False,
                                ))
            ])

        instance = ec2.CfnInstance(
            self,
            'instance',
            launch_template=ec2.CfnInstance.
            LaunchTemplateSpecificationProperty(
                version=template.latest_version_number,
                launch_template_id=template.launch_template_id,
            ),
            subnet_id=subnets[0].subnet_id)

        zone = route53.HostedZone.from_lookup(self, 'zone', domain_name=DOMAIN)
        route53.CnameRecord(self,
                            'cname',
                            record_name='gitlab',
                            domain_name=instance.attr_public_dns_name,
                            zone=zone,
                            ttl=cdk.Duration.minutes(5))

        self.vpc = vpc
        self.security_group = security_group
Esempio n. 15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Set a vpc
        vpc = ec2.Vpc.from_lookup(self, "VPC", is_default=True)
        vpc_subnets = ec2.SubnetSelection()

        # Set access policies for the instance
        policies = [
            # Read only access for all our s3 buckets
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"),
            # Allow us login by the ssm manger
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore")
        ]

        # Get role object with set policies
        role = iam.Role(self,
                        "EC2Role",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
                        managed_policies=policies)

        # Get a root ebs volume with specified sizw (mount: /dev/sda1)
        ebs_root_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("ROOT_VOLUME_SIZE")))
        ebs_root_block_device = ec2.BlockDevice(device_name="/dev/sda1",
                                                volume=ebs_root_vol)

        # Get volume - contains a block device volume and a block device
        ebs_extended_vol = ec2.BlockDeviceVolume.ebs(
            volume_size=int(self.node.try_get_context("EXTENDED_VOLUME_SIZE")))
        # Place volume on a block device with a set mount point
        ebs_extended_block_device = ec2.BlockDevice(device_name=EXT_DEV_NAME,
                                                    volume=ebs_extended_vol)

        # # Run boot strap - User Data
        mappings = {
            "__EXT_DEV_NAME__": EXT_DEV_NAME,
            "__EXT_DEV_MOUNT__": '/mnt/gen3'
        }
        with open("user_data/user_data.sh", 'r') as user_data_h:
            user_data_sub = core.Fn.sub(user_data_h.read(), mappings)
        user_data = ec2.UserData.custom(user_data_sub)

        # Set instance type from ec2-type in context
        instance_type = ec2.InstanceType(
            instance_type_identifier=self.node.try_get_context("EC2_TYPE"))

        machine_image = ec2.GenericLinuxImage({
            self.region:
            self.node.try_get_context(
                "MACHINE_IMAGE"),  # Refer to an existing AMI type
        })

        # The code that defines your stack goes here
        # We take all of the parameters we have and place this into the ec2 instance class
        # Except LaunchTemplate which is added as a property to the instance
        host = ec2.Instance(
            self,
            id="Gen3AdminVm",
            instance_type=instance_type,
            instance_name=self.node.try_get_context("INSTANCE_NAME"),
            machine_image=machine_image,
            vpc=vpc,
            vpc_subnets=vpc_subnets,
            role=role,
            user_data=user_data,
            block_devices=[ebs_root_block_device, ebs_extended_block_device],
        )

        # Return instance ID
        core.CfnOutput(self, "Output", value=host.instance_id)
Esempio n. 16
0
    def __init__(self, scope: core.Construct, id: str, jenkins_ami: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        key_name = props['key_name']
        self.vpc = props['vpc']
        # sg-jenkins
        sgjenkins = ec2.SecurityGroup(self, "sg_jenkins", vpc=self.vpc, allow_all_outbound=True,
                                      security_group_name="sg_jenkins", description="sg_jenkins")
        #   sg.add_ingress_rule(peer=ec2.Peer.any_ipv4, connection=ec2.Port.tcp(22), description='测试22使用')
        sgjenkins.add_ingress_rule(peer=ec2.Peer.ipv4(
            '0.0.0.0/0'), connection=ec2.Port.tcp(22), description='sg_jenkins')
        sgjenkins.add_ingress_rule(peer=ec2.Peer.ipv4(
            '119.123.34.135/32'), connection=ec2.Port.tcp(80), description='sg_jenkins')

        role_ssm = iam.Role(
            self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        role_ssm.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(
            "service-role/AmazonEC2RoleforSSM"))

        self.jenkins_vm = ec2.Instance(self, 'jenkins',
                                       vpc=self.vpc,
                                       instance_type=ec2.InstanceType(
                                           instance_type_identifier="m5.large"),
                                       machine_image=jenkins_ami,
                                       block_devices=[ec2.BlockDevice(
                                           device_name="/dev/xvda",
                                           volume=ec2.BlockDeviceVolume.ebs(10,
                                                                            encrypted=True
                                                                            )
                                       ),
                                           ec2.BlockDevice(
                                           device_name="/dev/sdb",
                                           volume=ec2.BlockDeviceVolume.ebs(100,
                                                                            encrypted=True
                                                                            )
                                       )
                                       ],
                                       instance_name="jenkins",
                                       key_name=key_name,
                                       role=role_ssm,
                                       user_data=ec2.UserData.custom(
                                           user_data),
                                       security_group=sgjenkins,
                                       vpc_subnets=ec2.SubnetSelection(
                                           subnet_type=ec2.SubnetType.PUBLIC)
                                       )

        self.bastion_vm = ec2.Instance(self, 'eks-Bastion',
                                       vpc=self.vpc,
                                       instance_type=ec2.InstanceType(
                                           instance_type_identifier="m5.large"),
                                       machine_image=jenkins_ami,
                                       block_devices=[ec2.BlockDevice(
                                           device_name="/dev/xvda",
                                           volume=ec2.BlockDeviceVolume.ebs(10,
                                                                            encrypted=True
                                                                            )
                                       ), ec2.BlockDevice(
                                           device_name="/dev/sdb",
                                           volume=ec2.BlockDeviceVolume.ebs(100,
                                                                            encrypted=True
                                                                            )
                                       )],
                                       instance_name="eks-Bastion",
                                       key_name=key_name,
                                       role=role_ssm,
                                       user_data=ec2.UserData.custom(
                                           user_data),
                                       security_group=sgjenkins,
                                       vpc_subnets=ec2.SubnetSelection(
                                           subnet_type=ec2.SubnetType.PUBLIC)
                                       )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        # Just do it ugly straight line for now
        vpc = ec2.Vpc(self, "Github-Selfhost-vpc",
                      nat_gateways = 0,
                      enable_dns_hostnames = True,
                      enable_dns_support = True,
                      subnet_configuration=[ec2.SubnetConfiguration(name="selfhost_public", subnet_type=ec2.SubnetType.PUBLIC)]
                      )

        # AMI
        al2 = getLatestAL2Ami()
        centos = getLatestCentosAmi()
        ubuntu = getLatestUbuntuAmi()

        instances = []

        # Instance creation.
        # Stands up an instance, then installs the github runner on the first boot.
        user_data_al2 = ec2.UserData.for_linux()
        user_data_al2.add_commands("yum update -y",
                                   "amazon-linux-extras enable docker",
                                   "yum install -y curl docker",
                                   "systemctl start docker",
                                   "(su ec2-user && cd ~ && mkdir actions-runner && cd actions-runner && "
                                   "curl -O -L https://github.com/actions/runner/releases/download/v2.273.0/actions-runner-linux-arm64-2.273.0.tar.gz "
                                   "&& tar xzf ./actions-runner-linux-arm64-2.273.0.tar.gz && ./config.sh --unattended --url {} --token {} --labels al2 "
                                   "&& sudo ./svc.sh install && sudo ./svc.sh start)".format(GITHUB_REPO, GITHUB_TOKEN))
        instance_al2 = ec2.Instance(self, "al2-tester",
            instance_type=ec2.InstanceType("c6g.medium"),
            machine_image=al2,
            vpc=vpc,
            key_name=KEY_NAME,
            block_devices=[ec2.BlockDevice(device_name='/dev/xvda', volume=ec2.BlockDeviceVolume(ec2.EbsDeviceProps(volume_size=128)))],
            user_data=user_data_al2)
        instances.append(instance_al2)
        
        user_data_centos = ec2.UserData.for_linux()
        user_data_centos.add_commands("yum update -y",
                                      "yum install -y curl",
                                      "dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo",
                                      "dnf -y install docker-ce --nobest",
                                      "systemctl start docker",
                                      "(su centos && cd ~ && mkdir actions-runner && cd actions-runner && "
                                      "curl -O -L https://github.com/actions/runner/releases/download/v2.273.0/actions-runner-linux-arm64-2.273.0.tar.gz "
                                      "&& tar xzf ./actions-runner-linux-arm64-2.273.0.tar.gz && ./config.sh --unattended --url {} --token {} --labels centos8 "
                                      "&& sudo ./svc.sh install && sudo ./svc.sh start)".format(GITHUB_REPO, GITHUB_TOKEN))
        instance_centos = ec2.Instance(self, "centos8-tester",
            instance_type=ec2.InstanceType("c6g.medium"),
            machine_image=centos,
            vpc=vpc,
            key_name=KEY_NAME,
            block_devices=[ec2.BlockDevice(device_name='/dev/sda1', volume=ec2.BlockDeviceVolume(ec2.EbsDeviceProps(volume_size=128)))],
            user_data=user_data_centos)
        instances.append(instance_centos)

        user_data_focal = ec2.UserData.for_linux()
        user_data_focal.add_commands("apt-get update -y",
                                     "apt-get upgrade -y",
                                     "apt-get install -y curl software-properties-common",
                                     "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -",
                                     "add-apt-repository "
                                     "'deb [arch=arm64] https://download.docker.com/linux/ubuntu focal stable'",
                                     "apt-get update -y",
                                     "apt-get install -y docker-ce docker-ce-cli containerd.io",
                                     "systemctl start docker",
                                     "(su ubuntu && cd ~ && mkdir actions-runner && cd actions-runner && "
                                     "curl -O -L https://github.com/actions/runner/releases/download/v2.273.0/actions-runner-linux-arm64-2.273.0.tar.gz "
                                     "&& tar xzf ./actions-runner-linux-arm64-2.273.0.tar.gz && ./config.sh --url {} --token {} --unattended --labels focal "
                                     "&& sudo ./svc.sh install && sudo ./svc.sh start)".format(GITHUB_REPO, GITHUB_TOKEN))
        instance_focal = ec2.Instance(self, "focal-tester",
            instance_type=ec2.InstanceType("c6g.medium"),
            machine_image=ubuntu,
            vpc=vpc,
            key_name=KEY_NAME,
            block_devices=[ec2.BlockDevice(device_name='/dev/sda1', volume=ec2.BlockDeviceVolume(ec2.EbsDeviceProps(volume_size=128)))],
            user_data=user_data_focal)
        instances.append(instance_focal)

        # Allow inbound HTTPS connections
        for instance in instances:
            instance.connections.allow_from_any_ipv4(ec2.Port.tcp(443), 'Allow inbound HTTPS connections')
            if AWS_PREFIX_LIST:
                instance.connections.allow_from(ec2.Peer.prefix_list(AWS_PREFIX_LIST), ec2.Port.tcp(22), 'Allow inbound SSH connections from trusted sources')