コード例 #1
0
    def execute(self, target_asset: Asset):
        local_asset = self.__user_data.add_s3_download_command(
            bucket=target_asset.bucket, bucket_key=target_asset.s3_object_key)

        target_asset.grant_read(self.__instance.role)

        return local_asset
コード例 #2
0
ファイル: compute_tier.py プロジェクト: yashda/aws-rfdk
class UserDataProvider(InstanceUserDataProvider):
    def __init__(self, scope: Construct, stack_id: str):
        super().__init__(scope, stack_id)
        self.test_script = Asset(scope,
                                 "SampleAsset",
                                 path=os.path.join(os.getcwd(), "..",
                                                   "scripts",
                                                   "configure_worker.sh"))

    def pre_cloud_watch_agent(self, host) -> None:
        host.user_data.add_commands("echo preCloudWatchAgent")

    def pre_render_queue_configuration(self, host) -> None:
        host.user_data.add_commands("echo preRenderQueueConfiguration")

    def pre_worker_configuration(self, host) -> None:
        host.user_data.add_commands("echo preWorkerConfiguration")

    def post_worker_launch(self, host) -> None:
        host.user_data.add_commands("echo postWorkerLaunch")
        self.test_script.grant_read(host)
        local_path = host.user_data.add_s3_download_command(
            bucket=self.test_script.bucket,
            bucket_key=self.test_script.s3_object_key)
        host.user_data.add_execute_file_command(file_path=local_path)
コード例 #3
0
ファイル: compute_tier.py プロジェクト: yashda/aws-rfdk
 def __init__(self, scope: Construct, stack_id: str):
     super().__init__(scope, stack_id)
     self.test_script = Asset(scope,
                              "SampleAsset",
                              path=os.path.join(os.getcwd(), "..",
                                                "scripts",
                                                "configure_worker.sh"))
コード例 #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(self,
                      "VPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        # AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        # Security Group
        security_group = ec2.SecurityGroup(self,
                                           "SecurityGroup",
                                           vpc=vpc,
                                           allow_all_outbound=True)

        security_group.add_ingress_rule(peer=ec2.Peer().ipv4("0.0.0.0/0"),
                                        connection=ec2.Port.tcp(80))

        # Instance
        instance = ec2.Instance(self,
                                "Instance",
                                instance_type=ec2.InstanceType("t3a.micro"),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                role=role,
                                security_group=security_group)

        # Script in S3 as Asset
        asset = Asset(self,
                      "Asset",
                      path=os.path.join(dirname, "configure.sh"))
        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)

        # Userdata executes script from S3
        instance.user_data.add_execute_file_command(file_path=local_path)
        asset.grant_read(instance.role)
コード例 #5
0
 async def create_site_function(self, id: str, domain: str, cdn_name: str) -> Function:
     env = {
         'PROD': 'True',
         'SITE_DOMAIN': domain,
         'APP_VERSION': '0.02',
         'STATIC_DOMAIN': cdn_name,
         'PROD': 'True'
     }
     site_code_asset = Asset(
         self,
         '{}FunctionAsset'.format(id),
         path='site_function')
     site_code = S3Code(
         bucket=site_code_asset.bucket,
         key=site_code_asset.s3_object_key)
     return Function(
         self,
         '{}Function'.format(id),
         timeout=Duration.seconds(3),
         code=site_code,
         handler='site_function.handler',
         environment=env,
         tracing=Tracing.ACTIVE,
         initial_policy=[DDB_FUNCTION_POLICY_STATEMENT],
         runtime=Runtime(
             name='python3.7',
             supports_inline_code=True,
         )
     )
コード例 #6
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(self,
                      "VPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        # AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))

        # Instance
        instance = ec2.Instance(self,
                                "Instance",
                                instance_type=ec2.InstanceType("t3.nano"),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                role=role)

        # Script in S3 as Asset
        asset = Asset(self,
                      "Asset",
                      path=os.path.join(dirname, "configure.sh"))
        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)

        # Userdata executes script from S3
        instance.user_data.add_execute_file_command(file_path=local_path)
        asset.grant_read(instance.role)
コード例 #7
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        
        # website_bucket = s3.Bucket(self, "mybucketnikhil",bucket_name="nikhilramgiri420",website_index_document="index.html",public_read_access=True)

        asset = Asset(self, 'SampleSingleFileAsset',path=os.path.join("Project_Data",'Titanic.csv'))

        bucketname = core.CfnOutput(self, "S3Bucket", value=asset.s3_bucket_name,description="mynameisnikhil420",export_name="nikhil420")
        core.CfnOutput(self, "S3ObjectKey", value=asset.s3_object_key,description="Titanic")
        core.CfnOutput(self, "S3HttpURL", value=asset.http_url)
        core.CfnOutput(self, "S3ObjectURL", value=asset.s3_object_url)
コード例 #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        vpc = ec2.Vpc(self,
                      "NewInstanceVPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        instance = ec2.Instance(
            self,
            "CDKNewInstance",
            instance_type=ec2.InstanceType("t3.nano"),
            key_name="arronmoore_com_v2",
            machine_image=amzn_linux,
            vpc=vpc,
            security_group=self.configure_security_group(vpc),
            role=role)

        asset = Asset(self,
                      "NewInstanceConfigureScript",
                      path="./new_instance/configure.sh")

        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)

        instance.user_data.add_execute_file_command(file_path=local_path)
コード例 #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(
            self,
            f"{prefix}_vpc",
            nat_gateways=1,
            enable_dns_hostnames=True,
            enable_dns_support=True,
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="privat",
                                        subnet_type=ec2.SubnetType.PRIVATE)
            ])

        # MSK Cluster Security Group
        sg_msk = ec2.SecurityGroup(self,
                                   f"{prefix}_sg",
                                   vpc=vpc,
                                   allow_all_outbound=True,
                                   security_group_name=f"{prefix}_sg_msk")
        for subnet in vpc.public_subnets:
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(2181), "Zookeeper Plaintext")
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(2182), "Zookeeper TLS")
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(9092), "Broker Plaintext")
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(9094), "Zookeeper Plaintext")
        for subnet in vpc.private_subnets:
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.all_traffic(),
                                    "All private traffic")

        # MSK Cluster
        msk.CfnCluster(
            self,
            f"{prefix}_kafka_cluster",
            cluster_name="msk-quickstart",
            number_of_broker_nodes=len(vpc.private_subnets),
            kafka_version="2.6.0",
            encryption_info=msk.CfnCluster.EncryptionInfoProperty(
                encryption_in_transit=msk.CfnCluster.
                EncryptionInTransitProperty(client_broker="TLS_PLAINTEXT")),
            broker_node_group_info=msk.CfnCluster.BrokerNodeGroupInfoProperty(
                instance_type="kafka.m5.large",
                client_subnets=[
                    subnet.subnet_id for subnet in vpc.private_subnets
                ],
                security_groups=[sg_msk.security_group_id],
                storage_info=msk.CfnCluster.StorageInfoProperty(
                    ebs_storage_info=msk.CfnCluster.EBSStorageInfoProperty(
                        volume_size=200))))

        # EC2 Client AMI
        amazon_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        f"{prefix}_ssm_role",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonMSKReadOnlyAccess"))

        # EC2 Client Instance
        instance = ec2.Instance(
            self,
            f"{prefix}_instance",
            instance_type=ec2.InstanceType("m5.large"),
            machine_image=amazon_linux,
            vpc=vpc,
            role=role,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))

        # Bootstrap script in S3 as Asset
        asset_bootstrap = Asset(self,
                                f"{prefix}_bootstrap",
                                path=os.path.join(dirname, "configure.sh"))
        local_bootstrap_path = instance.user_data.add_s3_download_command(
            bucket=asset_bootstrap.bucket,
            bucket_key=asset_bootstrap.s3_object_key)

        # Loader project in S3 Asset
        asset_loader = Asset(self,
                             f"{prefix}_loader",
                             path=os.path.join(dirname, "earthquake_loader"))
        instance.user_data.add_s3_download_command(
            bucket=asset_loader.bucket,
            bucket_key=asset_loader.s3_object_key,
            local_file="earthquake_loader.zip")

        # Userdata executes bootstrap script from S3
        instance.user_data.add_execute_file_command(
            file_path=local_bootstrap_path)

        # Grant read permissions to assets
        asset_bootstrap.grant_read(instance.role)
        asset_loader.grant_read(instance.role)
コード例 #10
0
ファイル: runner_stack.py プロジェクト: VunkLai/gitlab
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 gitlab: cdk.Stack, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        tags = cdk.Tags.of(self)
        tags.add(key='Stage', value='DevOps')
        tags.add(key='Module', value='Runner')
        tags.add(key='Owner', value='Vunk.Lai')
        tags.add(key='Name', value='GitLab/Runner', apply_to_launched_instances=True)

        subnets = gitlab.vpc.select_subnets(subnet_group_name='Runner').subnets

        security_group = ec2.SecurityGroup(
            self, 'sg',
            vpc=gitlab.vpc,
            security_group_name='GitLab/Runner:SecurityGroup',
            description='Default Runner Security Group',
            allow_all_outbound=True)

        policy = iam.ManagedPolicy(
            self, 'policy',
            # Use alphanumeric and '+=,.@-_' characters
            managed_policy_name='GitLab-Runner_Policy',
            description='SSM Login',
            statements=[
                iam.PolicyStatement(
                    actions=['ssmmessages:*', 'ssm:UpdateInstanceInformation'],
                    resources=['*']),
            ])

        role = iam.Role(
            self, 'role',
            # Use alphanumeric and '+=,.@-_' characters
            role_name='GitLab-Runner_Role',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[policy])

        folder = Path(__file__).parent.parent / 'user_data'
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(
            'apt install unzip',
            'curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "aws_cli_v2.zip"',
            'unzip aws_cli_v2.zip',
            'sudo ./aws/install',
            'aws --version')
        asset = Asset(self, 'asset:userdata', path=str(folder / 'runner.sh'))
        asset.grant_read(role)
        path = user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)
        user_data.add_execute_file_command(
            file_path=path, arguments='--verbose -y')

        template = ec2.LaunchTemplate(
            self, 'template',
            launch_template_name='GitLab/Runner_LaunchTemplate',
            cpu_credits=ec2.CpuCredits.STANDARD,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MICRO),
            machine_image=ec2.MachineImage.lookup(
                name='ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*',
                owners=['099720109477']),
            role=role,
            security_group=security_group,
            user_data=user_data,
            block_devices=[
                ec2.BlockDevice(
                    device_name='/dev/sda1',
                    volume=ec2.BlockDeviceVolume.ebs(
                        volume_size=20,
                        volume_type=ec2.EbsDeviceVolumeType.GP3,
                        delete_on_termination=True,
                    )),
            ]
        )

        ec2.CfnInstance(
            self, 'instance',
            launch_template=ec2.CfnInstance.LaunchTemplateSpecificationProperty(
                version=template.latest_version_number,
                launch_template_id=template.launch_template_id,
            ),
            subnet_id=subnets[0].subnet_id
        )
コード例 #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        app = App()
        cdk_stack = Stack(app, 'network')

        # Add a tag to all constructs in the stack
        Tag.add(cdk_stack, "Created By", "umut.yalcinkaya")

        # VPC
        vpc = ec2.Vpc(self,
                      "VPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])
        # Iterate the private subnets
        # selection = vpc.select_subnets(
        #     subnet_type=ec2.SubnetType.PRIVATE
        # )

        # for subnet in selection.subnets:
        #     pass

        # AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        # Instance
        instance = ec2.Instance(self,
                                "Instance",
                                instance_type=ec2.InstanceType("t3a.large"),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                role=role,
                                key_name="umut-poc")

        instance.instance.add_property_override(
            "BlockDeviceMappings", [{
                "DeviceName": "/dev/sdb",
                "Ebs": {
                    "VolumeSize": "30",
                    "VolumeType": "gp2",
                    "DeleteOnTermination": "true"
                }
            }])

        # Script in S3 as Asset
        asset = Asset(self,
                      "Asset",
                      path=os.path.join(dirname, "configure.sh"))
        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)

        # Userdata executes script from S3
        instance.user_data.add_execute_file_command(file_path=local_path)
        asset.grant_read(instance.role)
        # output information after deploy
        output = core.CfnOutput(self,
                                "BastionHost_information",
                                value=instance.instance_public_ip,
                                description="BastionHost's Public IP")
コード例 #12
0
ファイル: nlb_self.py プロジェクト: frosch123/aws-infra
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: ICluster,
        ecs_security_group: SecurityGroup,
        ecs_source_security_group: SecurityGroup,
        vpc: IVpc,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        global g_nlb

        Tags.of(self).add("Stack", "Common-Nlb")

        # TODO -- You need to do some manual actions:
        # TODO --  1) enable auto-assign IPv6 address on public subnets
        # TODO --  2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0"

        self.private_zone = HostedZone.from_lookup(
            self,
            "PrivateZone",
            domain_name="openttd.internal",
            private_zone=True,
        )

        user_data = UserData.for_linux(shebang="#!/bin/bash -ex")

        asset = Asset(self, "NLB", path="user_data/nlb/")
        user_data.add_commands(
            "echo 'Extracting user-data files'",
            "mkdir /nlb",
            "cd /nlb",
        )
        user_data.add_s3_download_command(
            bucket=asset.bucket,
            bucket_key=asset.s3_object_key,
            local_file="/nlb/files.zip",
        )
        user_data.add_commands("unzip files.zip", )

        user_data.add_commands(
            "echo 'Setting up configuration'",
            f"echo '{self.region}' > /etc/.region",
            f"echo '{cluster.cluster_name}' > /etc/.cluster",
        )

        user_data.add_commands(
            "echo 'Installing nginx'",
            "amazon-linux-extras install epel",
            "yum install nginx -y",
            "cp /nlb/nginx.conf /etc/nginx/nginx.conf",
            "mkdir /etc/nginx/nlb.d",
        )

        user_data.add_commands(
            "echo 'Installing Python3'",
            "yum install python3 -y",
            "python3 -m venv /venv",
            "/venv/bin/pip install -r /nlb/requirements.txt",
        )

        user_data.add_commands(
            "echo 'Generating nginx configuration'",
            "cd /etc/nginx/nlb.d",
            "/venv/bin/python /nlb/nginx.py",
            "systemctl start nginx",
        )

        user_data.add_commands(
            "echo 'Setting up SOCKS proxy'",
            "useradd pproxy",
            "cp /nlb/pproxy.service /etc/systemd/system/",
            "systemctl daemon-reload",
            "systemctl enable pproxy.service",
            "systemctl start pproxy.service",
        )

        asg = AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.nano"),
            machine_image=MachineImage.latest_amazon_linux(
                generation=AmazonLinuxGeneration.AMAZON_LINUX_2),
            min_capacity=2,
            vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC,
                                        one_per_az=True),
            user_data=user_data,
            health_check=HealthCheck.elb(grace=Duration.seconds(0)),
        )
        asg.add_security_group(ecs_security_group)

        asg.role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))
        asset.grant_read(asg.role)
        policy = ManagedPolicy(self, "Policy")
        policy_statement = PolicyStatement(
            actions=[
                "ec2:DescribeInstances",
                "ecs:DescribeContainerInstances",
                "ecs:DescribeTasks",
                "ecs:ListContainerInstances",
                "ecs:ListServices",
                "ecs:ListTagsForResource",
                "ecs:ListTasks",
            ],
            resources=["*"],
        )
        policy.add_statements(policy_statement)
        asg.role.add_managed_policy(policy)

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.security_group = asg.node.children[0]

        listener_https.add_targets(
            subdomain_name=self.admin_subdomain_name,
            port=80,
            target=asg,
            priority=2,
        )

        # Create a Security Group so the lambdas can access the EC2.
        # This is needed to check if the EC2 instance is fully booted.
        lambda_security_group = SecurityGroup(
            self,
            "LambdaSG",
            vpc=vpc,
        )
        self.security_group.add_ingress_rule(
            peer=lambda_security_group,
            connection=Port.tcp(80),
            description="Lambda to target",
        )

        self.security_group.add_ingress_rule(
            peer=ecs_source_security_group,
            connection=Port.udp(8080),
            description="ECS to target",
        )

        self.create_ecs_lambda(
            cluster=cluster,
            auto_scaling_group=asg,
        )

        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING,
            timeout=Duration.seconds(180),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )
        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING,
            timeout=Duration.seconds(30),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )

        # Initialize the NLB record on localhost, as we need to be able to
        # reference it for other entries to work correctly.
        ARecord(
            self,
            "ARecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        AaaaRecord(
            self,
            "AAAARecord",
            target=RecordTarget.from_ip_addresses("::1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        # To make things a bit easier, also alias to staging.
        self.create_alias(self, "nlb.staging")

        # Create a record for the internal DNS
        ARecord(
            self,
            "APrivateRecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=self.private_zone,
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )

        if g_nlb is not None:
            raise Exception("Only a single NlbStack instance can exist")
        g_nlb = self
コード例 #13
0
 def __create_asset(self, id: str, path: str):
     return Asset(scope=self, id=id, path=path)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ################################################################################
        # VPC
        vpc = ec2.Vpc(self, "Monitoring VPC", max_azs=3)

        ################################################################################
        # Amazon OpenSearch Service domain
        es_sec_grp = ec2.SecurityGroup(
            self,
            'OpenSearchSecGrpMonitoring',
            vpc=vpc,
            allow_all_outbound=True,
            security_group_name='OpenSearchSecGrpMonitoring')
        es_sec_grp.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80))
        es_sec_grp.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443))

        domain = opensearch.Domain(
            self,
            'opensearch-service-monitor',
            version=opensearch.EngineVersion.
            OPENSEARCH_1_0,  # Upgrade when CDK upgrades
            domain_name=DOMAIN_NAME,
            removal_policy=core.RemovalPolicy.DESTROY,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type=DOMAIN_DATA_NODE_INSTANCE_TYPE,
                data_nodes=DOMAIN_DATA_NODE_INSTANCE_COUNT,
                master_node_instance_type=DOMAIN_MASTER_NODE_INSTANCE_TYPE,
                master_nodes=DOMAIN_MASTER_NODE_INSTANCE_COUNT,
                warm_instance_type=DOMAIN_UW_NODE_INSTANCE_TYPE,
                warm_nodes=DOMAIN_UW_NODE_INSTANCE_COUNT),
            ebs=opensearch.EbsOptions(enabled=True,
                                      volume_size=DOMAIN_INSTANCE_VOLUME_SIZE,
                                      volume_type=ec2.EbsDeviceVolumeType.GP2),
            vpc=vpc,
            vpc_subnets=[ec2.SubnetType.PUBLIC],
            security_groups=[es_sec_grp],
            zone_awareness=opensearch.ZoneAwarenessConfig(
                enabled=True, availability_zone_count=DOMAIN_AZ_COUNT),
            enforce_https=True,
            node_to_node_encryption=True,
            encryption_at_rest={"enabled": True},
            use_unsigned_basic_auth=True,
            fine_grained_access_control={
                "master_user_name":
                DOMAIN_ADMIN_UNAME,
                "master_user_password":
                core.SecretValue.plain_text(DOMAIN_ADMIN_PW)
            })

        core.CfnOutput(
            self,
            "MasterUser",
            value=DOMAIN_ADMIN_UNAME,
            description="Master User Name for Amazon OpenSearch Service")

        core.CfnOutput(
            self,
            "MasterPW",
            value=DOMAIN_ADMIN_PW,
            description="Master User Password for Amazon OpenSearch Service")

        ################################################################################
        # Dynamo DB table for time stamp tracking
        table = ddb.Table(
            self,
            'opensearch-monitor-lambda-timestamp',
            table_name=TABLE_NAME,
            partition_key=ddb.Attribute(name="domain",
                                        type=ddb.AttributeType.STRING),
            sort_key=ddb.Attribute(name='region',
                                   type=ddb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        ################################################################################
        # Lambda monitoring function
        lambda_func = lambda_.Function(
            self,
            'CWMetricsToOpenSearch',
            function_name="CWMetricsToOpenSearch_monitoring",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('CWMetricsToOpenSearch'),
            handler='handler.handler',
            memory_size=1024,
            timeout=core.Duration.minutes(10),
            vpc=vpc)

        table.grant_read_data(lambda_func)
        table.grant_write_data(lambda_func)
        lambda_func.add_environment('TABLE', table.table_name)
        lambda_func.add_environment('DOMAIN_ENDPOINT',
                                    'https://' + domain.domain_endpoint)
        lambda_func.add_environment('DOMAIN_ADMIN_UNAME', DOMAIN_ADMIN_UNAME)
        lambda_func.add_environment('DOMAIN_ADMIN_PW', DOMAIN_ADMIN_PW)
        lambda_func.add_environment('REGIONS', REGIONS_TO_MONITOR)

        # When the domain is created here, restrict access
        lambda_func.add_to_role_policy(
            iam.PolicyStatement(actions=['es:*'], resources=['*']))

        # The function needs to read CW events. Restrict
        lambda_func.add_to_role_policy(
            iam.PolicyStatement(actions=['cloudwatch:*'], resources=['*']))

        lambda_schedule = events.Schedule.rate(
            core.Duration.seconds(LAMBDA_INTERVAL))
        event_lambda_target = targets.LambdaFunction(handler=lambda_func)
        events.Rule(self,
                    "Monitoring",
                    enabled=True,
                    schedule=lambda_schedule,
                    targets=[event_lambda_target])

        ################################################################################
        # Lambda for CW Logs
        lambda_func_cw_logs = lambda_.Function(
            self,
            'CWLogsToOpenSearch',
            function_name="CWLogsToOpenSearch_monitoring",
            runtime=lambda_.Runtime.NODEJS_12_X,
            code=lambda_.Code.asset('CWLogsToOpenSearch'),
            handler='index.handler',
            vpc=vpc)

        # # Load Amazon OpenSearch Service Domain to env variable
        lambda_func_cw_logs.add_environment('DOMAIN_ENDPOINT',
                                            domain.domain_endpoint)

        # # When the domain is created here, restrict access
        lambda_func_cw_logs.add_to_role_policy(
            iam.PolicyStatement(actions=['es:*'], resources=['*']))

        # # The function needs to read CW Logs. Restrict
        lambda_func_cw_logs.add_to_role_policy(
            iam.PolicyStatement(actions=['logs:*'], resources=['*']))

        # Add permission to create CW logs trigger for all specified region and current account, as region does not have an option to be wildcard
        account_id = boto3.client("sts").get_caller_identity()["Account"]
        for region in json.loads(REGIONS_TO_MONITOR):
            lambda_func_cw_logs.add_permission(
                id="lambda-cw-logs-permission-" + region,
                principal=iam.ServicePrincipal("logs.amazonaws.com"),
                action="lambda:InvokeFunction",
                source_arn="arn:aws:logs:" + region + ":" + account_id +
                ":*:*:*")

        ################################################################################
        # Jump host for SSH tunneling and direct access
        sn_public = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)

        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))

        instance = ec2.Instance(
            self,
            'instance',
            instance_type=ec2.InstanceType(EC2_INSTANCE_TYPE),
            vpc=vpc,
            machine_image=amzn_linux,
            vpc_subnets=sn_public,
            key_name=EC2_KEY_NAME,
            role=role,
        )
        instance.connections.allow_from_any_ipv4(ec2.Port.tcp(22), 'SSH')
        instance.connections.allow_from_any_ipv4(ec2.Port.tcp(443), 'HTTPS')

        stmt = iam.PolicyStatement(actions=['es:*'],
                                   resources=[domain.domain_arn])
        instance.add_to_role_policy(stmt)

        # Create SNS topic, subscription, IAM roles, Policies
        sns_topic = sns.Topic(self, "cdk_monitoring_topic")

        sns_topic.add_subscription(
            subscriptions.EmailSubscription(SNS_NOTIFICATION_EMAIL))

        sns_policy_statement = iam.PolicyStatement(
            actions=["sns:publish"],
            resources=[sns_topic.topic_arn],
            effect=iam.Effect.ALLOW)
        sns_policy = iam.ManagedPolicy(self, "cdk_monitoring_policy")
        sns_policy.add_statements(sns_policy_statement)

        sns_role = iam.Role(
            self,
            "cdk_monitoring_sns_role",
            assumed_by=iam.ServicePrincipal("es.amazonaws.com"))
        sns_role.add_managed_policy(sns_policy)

        dirname = os.path.dirname(__file__)
        dashboards_asset = Asset(
            self,
            "DashboardsAsset",
            path=os.path.join(dirname,
                              'export_opensearch_dashboards_V1_0.ndjson'))
        dashboards_asset.grant_read(instance.role)
        dashboards_asset_path = instance.user_data.add_s3_download_command(
            bucket=dashboards_asset.bucket,
            bucket_key=dashboards_asset.s3_object_key,
        )

        nginx_asset = Asset(self,
                            "NginxAsset",
                            path=os.path.join(dirname,
                                              'nginx_opensearch.conf'))
        nginx_asset.grant_read(instance.role)
        nginx_asset_path = instance.user_data.add_s3_download_command(
            bucket=nginx_asset.bucket,
            bucket_key=nginx_asset.s3_object_key,
        )

        alerting_asset = Asset(self,
                               "AlertingAsset",
                               path=os.path.join(dirname, 'create_alerts.sh'))
        alerting_asset.grant_read(instance.role)
        alerting_asset_path = instance.user_data.add_s3_download_command(
            bucket=alerting_asset.bucket,
            bucket_key=alerting_asset.s3_object_key,
        )

        instance.user_data.add_commands(
            "yum update -y",
            "yum install jq -y",
            "amazon-linux-extras install nginx1.12",
            "cd /tmp/assets",
            "mv {} export_opensearch_dashboards_V1_0.ndjson".format(
                dashboards_asset_path),
            "mv {} nginx_opensearch.conf".format(nginx_asset_path),
            "mv {} create_alerts.sh".format(alerting_asset_path),
            "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/nginx/cert.key -out /etc/nginx/cert.crt -subj /C=US/ST=./L=./O=./CN=.\n"
            "cp nginx_opensearch.conf /etc/nginx/conf.d/",
            "sed -i 's/DEFAULT_DOMAIN_NAME/" + DOMAIN_NAME +
            "/g' /tmp/assets/export_opensearch_dashboards_V1_0.ndjson",
            "sed -i 's/DOMAIN_ENDPOINT/" + domain.domain_endpoint +
            "/g' /etc/nginx/conf.d/nginx_opensearch.conf",
            "sed -i 's/DOMAIN_ENDPOINT/" + domain.domain_endpoint +
            "/g' /tmp/assets/create_alerts.sh",
            "sed -i 's=LAMBDA_CW_LOGS_ROLE_ARN=" +
            lambda_func_cw_logs.role.role_arn +
            "=g' /tmp/assets/create_alerts.sh",
            "sed -i 's=SNS_ROLE_ARN=" + sns_role.role_arn +
            "=g' /tmp/assets/create_alerts.sh",
            "sed -i 's/SNS_TOPIC_ARN/" + sns_topic.topic_arn +
            "/g' /tmp/assets/create_alerts.sh",
            "sed -i 's=DOMAIN_ADMIN_UNAME=" + DOMAIN_ADMIN_UNAME +
            "=g' /tmp/assets/create_alerts.sh",
            "sed -i 's=DOMAIN_ADMIN_PW=" + DOMAIN_ADMIN_PW +
            "=g' /tmp/assets/create_alerts.sh",
            "systemctl restart nginx.service",
            "chmod 500 create_alerts.sh",
            "sleep 5",
            "bash --verbose create_alerts.sh",
        )

        core.CfnOutput(self,
                       "Dashboards URL (via Jump host)",
                       value="https://" + instance.instance_public_ip,
                       description="Dashboards URL via Jump host")

        core.CfnOutput(
            self,
            "SNS Subscription Alert Message",
            value=SNS_NOTIFICATION_EMAIL,
            description="Please confirm your SNS subscription receievedt at")
コード例 #15
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        tags = cdk.Tags.of(self)
        tags.add(key='Stage', value='DevOps')
        tags.add(key='Module', value='GitLab')
        tags.add(key='Owner', value='Vunk.Lai')
        tags.add(key='Name',
                 value='GitLab/GitLab',
                 apply_to_launched_instances=True)

        vpc = ec2.Vpc(
            self,
            'vpc',
            max_azs=1,
            cidr=ec2.Vpc.DEFAULT_CIDR_RANGE,
            nat_gateways=0,
            subnet_configuration=[
                ec2.SubnetConfiguration(name='Generic',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=24,
                                        reserved=True),
                ec2.SubnetConfiguration(name='GitLab',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=24),
                ec2.SubnetConfiguration(name='Runner',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=24),
            ])
        cdk.Tags.of(vpc).add(key='Name', value='GitLab/VPC')

        subnets = vpc.select_subnets(subnet_group_name='GitLab').subnets

        security_group = ec2.SecurityGroup(
            self,
            'sg',
            vpc=vpc,
            security_group_name='GitLab/GitLab:SecurityGroup',
            description='Default GitLab Security Group',
            allow_all_outbound=True)
        security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80),
                                        'LetsEncrypt HTTP-01')
        security_group.add_ingress_rule(ec2.Peer.ipv4(self.home_ip),
                                        ec2.Port.tcp(443), 'Home')
        security_group.add_ingress_rule(
            ec2.Peer.ipv4(ec2.Vpc.DEFAULT_CIDR_RANGE), ec2.Port.tcp(443),
            'LAN')

        policy = iam.ManagedPolicy(
            self,
            'policy',
            # Use alphanumeric and '+=,.@-_' characters
            managed_policy_name='GitLab-GitLab_Policy',
            description='SSM Login',
            statements=[
                iam.PolicyStatement(
                    actions=['ssmmessages:*', 'ssm:UpdateInstanceInformation'],
                    resources=['*']),
            ])

        role = iam.Role(
            self,
            'role',
            # Use alphanumeric and '+=,.@-_' characters
            role_name='GitLab-GitLab_Role',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[policy])

        folder = Path(__file__).parent.parent / 'user_data'
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(
            'apt install unzip',
            'curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "aws_cli_v2.zip"',
            'unzip aws_cli_v2.zip', 'sudo ./aws/install', 'aws --version')
        asset = Asset(self, 'asset:gitlab.rb', path=str(folder / 'gitlab.rb'))
        asset.grant_read(role)
        user_data.add_s3_download_command(bucket=asset.bucket,
                                          bucket_key=asset.s3_object_key,
                                          local_file='/etc/gitlab/gitlab.rb')
        asset = Asset(self, 'asset:userdata', path=str(folder / 'gitlab.sh'))
        asset.grant_read(role)
        path = user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)
        user_data.add_execute_file_command(file_path=path,
                                           arguments='--verbose -y')
        # asset = Asset(
        #     self, 'asset:prometheus:rules', path=str(folder / 'gitlab.rules.json'))

        template = ec2.LaunchTemplate(
            self,
            'template',
            # Use alphanumeric and '-()./_' characters
            launch_template_name='GitLab/GitLab_LaunchTemplate',
            cpu_credits=ec2.CpuCredits.STANDARD,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,
                                              ec2.InstanceSize.MEDIUM),
            machine_image=ec2.MachineImage.lookup(
                name='ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*',
                owners=['099720109477']),
            role=role,
            security_group=security_group,
            user_data=user_data,
            block_devices=[
                ec2.BlockDevice(device_name='/dev/sda1',
                                volume=ec2.BlockDeviceVolume.ebs(
                                    volume_size=8,
                                    volume_type=ec2.EbsDeviceVolumeType.GP3,
                                    delete_on_termination=True,
                                )),
                ec2.BlockDevice(device_name='/dev/sdf',
                                volume=ec2.BlockDeviceVolume.ebs(
                                    volume_size=20,
                                    volume_type=ec2.EbsDeviceVolumeType.GP3,
                                    delete_on_termination=False,
                                ))
            ])

        instance = ec2.CfnInstance(
            self,
            'instance',
            launch_template=ec2.CfnInstance.
            LaunchTemplateSpecificationProperty(
                version=template.latest_version_number,
                launch_template_id=template.launch_template_id,
            ),
            subnet_id=subnets[0].subnet_id)

        zone = route53.HostedZone.from_lookup(self, 'zone', domain_name=DOMAIN)
        route53.CnameRecord(self,
                            'cname',
                            record_name='gitlab',
                            domain_name=instance.attr_public_dns_name,
                            zone=zone,
                            ttl=cdk.Duration.minutes(5))

        self.vpc = vpc
        self.security_group = security_group