コード例 #1
0
    def __init__(self, scope: cdk.Stack, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the VPC resource.
        self._vpc = Vpc(self, "MyVPC", cidr="10.10.0.0/16")
        # Create a Security Group within the VPC that is used to allow
        # management traffic from designated jump hosts.
        self._sg = SecurityGroup(
            self,
            "MySG",
            vpc=self._vpc,
            allow_all_outbound=False,
            description="Management traffic from jump boxes",
            security_group_name="jumpbox-mgmt-traffic")

        # Add ingress rules to the Security Group for the jump host
        # 10.255.0.10 to TCP/22 and TCP/3389.
        self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"),
                                  connection=Port(
                                      protocol=Protocol.TCP,
                                      string_representation="host1",
                                      from_port=22,
                                      to_port=22))
        self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"),
                                  connection=Port(
                                      protocol=Protocol.TCP,
                                      string_representation="host1",
                                      from_port=3389,
                                      to_port=3389))
コード例 #2
0
    def __init__(self, scope: Construct, id: str, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Stack", "Common-Ecs")

        self._cluster = Cluster(
            self,
            "Cluster",
            vpc=vpc,
        )

        asg = AutoScalingGroup(
            self,
            "ClusterASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.small"),
            machine_image=EcsOptimizedImage.amazon_linux2(),
            min_capacity=4,
        )
        self._cluster.add_auto_scaling_group(asg)

        # Create a SecurityGroup that the NLB can use to allow traffic from
        # NLB to us. This avoids a cyclic dependency.
        self.security_group = SecurityGroup(
            self,
            "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=False,
        )

        # Only use "source_security_group" to check if flows come from ECS.
        # Do not use it to allow traffic in ECS; use "security_group" for
        # that.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.source_security_group = asg.node.children[0]

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        asg.node.children[0].add_ingress_rule(
            peer=self.security_group,
            connection=Port.tcp_range(32768, 65535),
            description="NLB-self to target",
        )
        asg.node.children[0].add_ingress_rule(
            peer=self.security_group,
            connection=Port.udp_range(32768, 65535),
            description="NLB-self to target (UDP)",
        )
コード例 #3
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ComputeTierProps, **kwargs):
        """
        Initializes a new instance of ComputeTier
        :param scope: The Scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties of this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        self.health_monitor = HealthMonitor(
            self,
            'HealthMonitor',
            vpc=props.vpc,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False)

        self.worker_fleet = WorkerInstanceFleet(
            self,
            'WorkerFleet',
            vpc=props.vpc,
            render_queue=props.render_queue,
            worker_machine_image=props.worker_machine_image,
            health_monitor=self.health_monitor,
            key_name=props.key_pair_name,
        )

        if props.bastion:
            self.worker_fleet.connections.allow_from(props.bastion,
                                                     Port.tcp(22))
コード例 #4
0
ファイル: cdkgoat_stack.py プロジェクト: bbhunt-2020/Canivete
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self,
                      'vpc1'
                      )

        bucket_name = 'my-cdk-bucket'
        s3.Bucket(self,
                  bucket_name,
                  bucket_name=bucket_name,
                  access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE,
                  removal_policy=RemovalPolicy.DESTROY)

        ec2.Volume(self, 'vol1', availability_zone='us-east-1a', size=core.Size.gibibytes(8))

        sg = ec2.SecurityGroup(self,
                               'sg1',
                               vpc=vpc)
        sg.add_ingress_rule(Peer.any_ipv4(), Port.tcp(22))

        kms.Key(self, 'kms1')

        rds.DatabaseInstance(self,
                             'rds1',
                             engine=rds.DatabaseInstanceEngine.postgres(version=PostgresEngineVersion.VER_12),
                             master_username='******',
                             vpc=vpc,
                             vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))
コード例 #5
0
ファイル: nlb_self.py プロジェクト: frosch123/aws-infra
    def add_nlb(self, scope: Construct, service: IEc2Service, port: Port,
                subdomain_name: str, description: str) -> None:
        port_dict = port.to_rule_json()
        Tags.of(service).add("NLB-protocol", port_dict["ipProtocol"])
        Tags.of(service).add("NLB-port", str(port_dict["fromPort"]))

        self.create_alias(scope, subdomain_name)

        self.security_group.add_ingress_rule(
            peer=Peer.any_ipv6(),
            connection=port,
            description=f"{description} (IPv6)")
        self.security_group.add_ingress_rule(
            peer=Peer.any_ipv4(),
            connection=port,
            description=f"{description} (IPv4)")
コード例 #6
0
    def __init__(self, scope: Construct, stack_id: str, *, props: ComputeTierProps, **kwargs):
        """
        Initializes a new instance of ComputeTier
        :param scope: The Scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties of this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        self.health_monitor = HealthMonitor(
            self,
            'HealthMonitor',
            vpc=props.vpc,
            vpc_subnets=SubnetSelection(
                subnet_group_name=subnets.INFRASTRUCTURE.name
            ),
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False
        )

        self.worker_fleet = WorkerInstanceFleet(
            self,
            'WorkerFleet',
            vpc=props.vpc,
            vpc_subnets=SubnetSelection(
                subnet_group_name=subnets.WORKERS.name
            ),
            render_queue=props.render_queue,
            worker_machine_image=props.worker_machine_image,
            health_monitor=self.health_monitor,
            key_name=props.key_pair_name,
            user_data_provider=UserDataProvider(self, 'UserDataProvider')
        )

        # This is an optional feature that will set up your EC2 instances to be enabled for use with
        # the Session Manager. These worker fleet instances aren't available through a public subnet,
        # so connecting to them directly through SSH isn't easy.
        SessionManagerHelper.grant_permissions_to(self.worker_fleet)

        if props.usage_based_licensing and props.licenses:
            props.usage_based_licensing.grant_port_access(self.worker_fleet, props.licenses)

        if props.bastion:
            self.worker_fleet.connections.allow_from(props.bastion, Port.tcp(22))
コード例 #7
0
 def get_web_security_group(self, vpc):
     security_group = SecurityGroup(
         self._stack,
         'obm_web',
         vpc=vpc,
         allow_all_outbound=True,
     )
     for port_number in [SSH_PORT, HTTP_PORT, HTTPS_PORT]:
         port = Port(from_port=port_number,
                     to_port=port_number,
                     protocol=Protocol.TCP,
                     string_representation=f"Port {port_number}")
         security_group.add_ingress_rule(peer=Peer.any_ipv4(),
                                         connection=port)
         security_group.add_ingress_rule(peer=Peer.any_ipv6(),
                                         connection=port)
     self._tag_it(security_group)
     return security_group
コード例 #8
0
    def create_sg(self, jump_host, mgmt_ports):
        # Create a Security Group within the VPC that is used to allow
        # management traffic from designated jump hosts.
        self._sg = SecurityGroup(
            self,
            "MySG",
            vpc=self._vpc,
            allow_all_outbound=False,
            description="Management traffic from jump boxes",
            security_group_name="jumpbox-mgmt-traffic")

        # Add ingress rules to the Security Group
        for port in mgmt_ports:
            self._sg.add_ingress_rule(peer=Peer.ipv4(jump_host),
                                      connection=Port(
                                          protocol=Protocol.TCP,
                                          string_representation="jump",
                                          from_port=int(port),
                                          to_port=int(port)))
コード例 #9
0
ファイル: __init__.py プロジェクト: febus982/cdk-platform
    def _enable_cross_fleet_communication(self,
                                          fleets: List[AutoScalingGroup]):
        security_groups: List[SecurityGroup] = [
            fleet.node.find_child("InstanceSecurityGroup") for fleet in fleets
        ]
        security_groups = list(set(security_groups))  # deduplication
        """
        This is horrible but we can't actually specify a common security group for all the ASGs, like managed nodes.
        We could add an additional common security group but this would breaks services of type `LoadBalancer`
        """
        for sg_target in security_groups:
            for sg_source in security_groups:
                rule_found = False
                for rule, value in sg_target.to_ingress_rule_config().items():
                    if rule == "sourceSecurityGroupId" and value == sg_source.security_group_id:
                        rule_found = True

                if not rule_found:
                    sg_target.connections.allow_from(sg_source,
                                                     Port.all_traffic())
コード例 #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self, 'vpc1')

        bucket_name = 'my-cdk-bucket'
        s3.Bucket(self,
                  bucket_name,
                  bucket_name=bucket_name,
                  access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE,
                  removal_policy=RemovalPolicy.DESTROY)

        ec2.Volume(self,
                   'vol1',
                   availability_zone='us-east-1a',
                   size=core.Size.gibibytes(8))

        sg = ec2.SecurityGroup(self, 'sg1', vpc=vpc)
        sg.add_ingress_rule(Peer.any_ipv4(), Port.tcp(22))

        kms.Key(self, 'kms1')
コード例 #11
0
 def __attach_etcd_server_access_rule(self):
     etcd_server_access_port = Port.tcp_range(start_port=2379,
                                              end_port=2380)
     self._master_security_group.connections.allow_internally(
         port_range=etcd_server_access_port)
コード例 #12
0
 def __attach_api_server_access_rule(self):
     api_server_access_port = Port.tcp(port=6443)
     self._master_security_group.connections.allow_from_any_ipv4(
         port_range=api_server_access_port)
コード例 #13
0
 def execute(self):
     self._manager_security_group.connections.allow_from_any_ipv4(
         port_range=Port.tcp(22))
コード例 #14
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create two VPCs - one to host our private website, the other to act as a client
        website_vpc = Vpc(
            self,
            "WEBSITEVPC",
            cidr="10.0.0.0/16",
        )
        client_vpc = Vpc(
            self,
            "ClientVPC",
            cidr="10.1.0.0/16",
        )

        # Create a bastion host in the client API which will act like our client workstation
        bastion = BastionHostLinux(
            self,
            "WEBClient",
            vpc=client_vpc,
            instance_name='my-bastion',
            instance_type=InstanceType('t3.micro'),
            machine_image=AmazonLinuxImage(),
            subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE),
            security_group=SecurityGroup(
                scope=self,
                id='bastion-sg',
                security_group_name='bastion-sg',
                description=
                'Security group for the bastion, no inbound open because we should access'
                ' to the bastion via AWS SSM',
                vpc=client_vpc,
                allow_all_outbound=True))

        # Set up a VPC peering connection between client and API VPCs, and adjust
        # the routing table to allow connections back and forth
        VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc)

        # Create VPC endpoints for API gateway
        vpc_endpoint = InterfaceVpcEndpoint(
            self,
            'APIGWVpcEndpoint',
            vpc=website_vpc,
            service=InterfaceVpcEndpointAwsService.APIGATEWAY,
            private_dns_enabled=True,
        )
        vpc_endpoint.connections.allow_from(bastion, Port.tcp(443))
        endpoint_id = vpc_endpoint.vpc_endpoint_id

        api_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.DENY,
                                conditions={
                                    "StringNotEquals": {
                                        "aws:SourceVpce": endpoint_id
                                    }
                                }),
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.ALLOW)
        ])

        # Create an s3 bucket to hold the content
        content_bucket = s3.Bucket(self,
                                   "ContentBucket",
                                   removal_policy=core.RemovalPolicy.DESTROY)

        # Upload our static content to the bucket
        s3dep.BucketDeployment(self,
                               "DeployWithInvalidation",
                               sources=[s3dep.Source.asset('website')],
                               destination_bucket=content_bucket)

        # Create a private API GW in the API VPC
        api = apigw.RestApi(self,
                            'PrivateS3Api',
                            endpoint_configuration=apigw.EndpointConfiguration(
                                types=[apigw.EndpointType.PRIVATE],
                                vpc_endpoints=[vpc_endpoint]),
                            policy=api_policy)

        # Create a role to allow API GW to access our S3 bucket contents
        role = iam.Role(
            self,
            "Role",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"))
        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    content_bucket.bucket_arn,
                                    content_bucket.bucket_arn + '/*'
                                ],
                                actions=["s3:Get*"]))

        # Create a proxy resource that captures all non-root resource requests
        resource = api.root.add_resource("{proxy+}")
        # Create an integration with S3
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                request_parameters=
                {  # map the proxy parameter so we can pass the request path
                    "integration.request.path.proxy":
                    "method.request.path.proxy"
                },
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        resource.add_method(
            "GET",
            resource_integration,
            method_responses=[
                apigw.MethodResponse(status_code='200',
                                     response_parameters={
                                         "method.response.header.Content-Type":
                                         False
                                     })
            ],
            request_parameters={"method.request.path.proxy": True})
        # Handle requests to the root of our site
        # Create another integration with S3 - this time with no proxy parameter
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        api.root.add_method("GET",
                            resource_integration,
                            method_responses=[
                                apigw.MethodResponse(
                                    status_code='200',
                                    response_parameters={
                                        "method.response.header.Content-Type":
                                        False
                                    })
                            ])
コード例 #15
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        role = iam.Role(
            scope=self,
            id='AwsCustomResourceRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        role.add_to_policy(
            iam.PolicyStatement(actions=['iam:PassRole'], resources=['*']))

        my_custom_resource = cr.AwsCustomResource(
            scope=self,
            id='MyAwsCustomResource',
            role=role,
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']),
            on_create=cr.AwsSdkCall(
                action='listBuckets',
                service='s3',
                physical_resource_id=cr.PhysicalResourceId.of('BucketsList'),
            ))

        vpc = VPCConstruct(self, id_='test-vpc', num_of_azs=2)
        security_group = SecurityGroup(
            self,
            id='test-security-group',
            vpc=vpc,
            security_group_name='test-security-group')
        security_group.add_ingress_rule(connection=Port.tcp(443),
                                        peer=vpc.lambdas_sg)

        domain = es.Domain(
            scope=self,
            id='Domain',
            version=es.ElasticsearchVersion.V7_9,
            domain_name="es-domain-name",
            enable_version_upgrade=False,
            enforce_https=True,
            fine_grained_access_control=None,
            node_to_node_encryption=True,
            tls_security_policy=es.TLSSecurityPolicy.TLS_1_0,
            logging=es.LoggingOptions(
                app_log_enabled=True,
                slow_index_log_enabled=True,
                slow_search_log_enabled=True,
                app_log_group=LogGroup(
                    scope=self,
                    id="app-log-group",
                    log_group_name=f'/aws/aes/domains/esdomain/app-log-group',
                    removal_policy=core.RemovalPolicy.DESTROY),
                slow_index_log_group=LogGroup(
                    scope=self,
                    id="slow-index-log-group",
                    log_group_name=
                    f'/aws/aes/domains/esdomain/slow-index-log-group',
                    removal_policy=core.RemovalPolicy.DESTROY),
                slow_search_log_group=LogGroup(
                    scope=self,
                    id="slow-search-log-group",
                    log_group_name=
                    f'/aws/aes/domains/esdomain/slow-search-log-group',
                    removal_policy=core.RemovalPolicy.DESTROY)),
            removal_policy=core.RemovalPolicy.DESTROY,
            zone_awareness=es.ZoneAwarenessConfig(availability_zone_count=2,
                                                  enabled=True),
            vpc_options=es.VpcOptions(
                security_groups=[security_group],
                subnets=vpc.audit_vpc.select_subnets(
                    subnet_group_name=PRIVATE_SUBNET_GROUP).subnets))
コード例 #16
0
 def __attach_inter_worker_access_rule(self):
     self._worker_security_group.connections.allow_from(
         other=self._worker_security_group.connections,
         port_range=Port.all_traffic())
コード例 #17
0
 def __attach_api_server_control_rule(self):
     api_server_control_port = Port.tcp(port=10250)
     self._worker_security_group.connections.allow_from(
         other=self._master_security_group.connections,
         port_range=api_server_control_port)
コード例 #18
0
 def __attach_api_server_internal_access_rule(self):
     api_server_internal_port = Port.tcp_range(start_port=10250,
                                               end_port=10252)
     self._master_security_group.connections.allow_internally(
         port_range=api_server_internal_port)
コード例 #19
0
ファイル: app.py プロジェクト: disruptek/mindsdb-demo
    def __init__(self, app: App, id: str, env: Environment) -> None:
        super().__init__(app, id, env=env)

        # start by getting the DNS zone we're going to work with
        zone = HostedZone.from_lookup(self, "Dominick", domain_name=DOMAIN)

        # create a certificate for the web service which matches its hostname
        cert = Certificate(self,
                           "Cletus",
                           domain_name=HOSTNAME,
                           validation=CertificateValidation.from_dns(zone))

        # the services will live in a vpc, of course
        vpc = ec2.Vpc(self, "Virgil")

        # we're going to scale this web-service automatically
        asg = AutoScalingGroup(
            self,
            "Alice",
            vpc=vpc,
            user_data=http_service(),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2))

        # explicitly allow internal access from the vpc just to be safe
        asg.connections.allow_internally(Port.tcp(WEB_PORT), "web-service")
        asg.connections.allow_internally(Port.tcp(NOT_WEB), "not-web")

        # expose the scaling group ports and permit egress
        asg.connections.allow_from_any_ipv4(Port.tcp(WEB_PORT))
        asg.connections.allow_from_any_ipv4(Port.tcp(NOT_WEB))

        # create a health check for the not-web service that currently
        if NOT_WEB_HEALTH_CHECKS:
            # points to the not-web service
            checker = HealthCheck(interval=Duration.seconds(10),
                                  port=NOT_WEB,
                                  protocol=Protocol.TCP)
        else:
            # points to the web port where our demo server listens
            checker = HealthCheck(interval=Duration.seconds(10),
                                  port=str(WEB_PORT),
                                  protocol=WEB_PROT)

        # put the scaling group behind a network target group for the LB
        notwebish = NetworkTargetGroup(self,
                                       "Allison",
                                       vpc=vpc,
                                       health_check=checker,
                                       targets=[asg],
                                       port=NOT_WEB,
                                       protocol=Protocol.TCP)

        # for the web-like ports, we can use the default health check
        webish = NetworkTargetGroup(
            self,
            "Alicen",
            vpc=vpc,
            health_check=HealthCheck(interval=Duration.seconds(10)),
            targets=[asg],
            port=WEB_PORT,
            protocol=WEB_PROT)

        if True:
            # create the load balancer and put it into dns
            lb = NetworkLoadBalancer(self,
                                     "Lisa",
                                     vpc=vpc,
                                     internet_facing=True)

            # create a hostname for the service
            CnameRecord(self,
                        "Carl",
                        domain_name=lb.load_balancer_dns_name,
                        zone=zone,
                        record_name=HOSTNAME.split('.')[0],
                        ttl=Duration.seconds(60))
        else:
            # a multi-step deployment could allow using an alias in R53
            lb = NetworkLoadBalancer.from_network_load_balancer_attributes(
                self,
                "Larry",
                vpc=vpc,
                load_balancer_arn=some.load_balancer_arn,
                load_balancer_dns_name=HOSTNAME,
                load_balancer_canonical_hosted_zone_id=zone.hosted_zone_id)

            # create a hostname for the service
            AaaaRecord(self,
                       "Eric",
                       zone=zone,
                       record_name=HOSTNAME.split('.')[0],
                       target=RecordTarget.from_alias(LoadBalancerTarget(lb)))

        # point the load balancer to the target group for the ssl service
        #
        # TODO: determine if we need to use the same cert for pub-facing
        #       and internal service
        listener_cert = ListenerCertificate(cert.certificate_arn)
        lb.add_listener("Cecil",
                        port=443,
                        certificates=[listener_cert],
                        default_target_groups=[webish])

        # point the load balancer to the target group for the web service
        lb.add_listener("Webster", port=80, default_target_groups=[webish])

        # point the load balancer to the group for the not-web service
        lb.add_listener("NotWeb",
                        default_target_groups=[notwebish],
                        port=NOT_WEB,
                        protocol=Protocol.TCP)

        # auto scale the, uh, autoscaling group
        asg.scale_on_cpu_utilization("ScaleCPU", target_utilization_percent=80)

        # emit some output values, largely for console use
        CfnOutput(self,
                  "LB",
                  export_name="LB",
                  value=lb.load_balancer_dns_name)
        CfnOutput(self,
                  "HTTP",
                  export_name="HTTP",
                  value="http://{}/".format(HOSTNAME))
        CfnOutput(self,
                  "HTTPS",
                  export_name="HTTPS",
                  value="https://{}/".format(HOSTNAME))
        CfnOutput(self,
                  "TCP",
                  export_name="TCP",
                  value="tcp://{}:{}/".format(HOSTNAME, NOT_WEB))
        CfnOutput(self, "Cert", export_name="Cert", value=cert.certificate_arn)
コード例 #20
0
ファイル: nlb_self.py プロジェクト: frosch123/aws-infra
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: ICluster,
        ecs_security_group: SecurityGroup,
        ecs_source_security_group: SecurityGroup,
        vpc: IVpc,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        global g_nlb

        Tags.of(self).add("Stack", "Common-Nlb")

        # TODO -- You need to do some manual actions:
        # TODO --  1) enable auto-assign IPv6 address on public subnets
        # TODO --  2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0"

        self.private_zone = HostedZone.from_lookup(
            self,
            "PrivateZone",
            domain_name="openttd.internal",
            private_zone=True,
        )

        user_data = UserData.for_linux(shebang="#!/bin/bash -ex")

        asset = Asset(self, "NLB", path="user_data/nlb/")
        user_data.add_commands(
            "echo 'Extracting user-data files'",
            "mkdir /nlb",
            "cd /nlb",
        )
        user_data.add_s3_download_command(
            bucket=asset.bucket,
            bucket_key=asset.s3_object_key,
            local_file="/nlb/files.zip",
        )
        user_data.add_commands("unzip files.zip", )

        user_data.add_commands(
            "echo 'Setting up configuration'",
            f"echo '{self.region}' > /etc/.region",
            f"echo '{cluster.cluster_name}' > /etc/.cluster",
        )

        user_data.add_commands(
            "echo 'Installing nginx'",
            "amazon-linux-extras install epel",
            "yum install nginx -y",
            "cp /nlb/nginx.conf /etc/nginx/nginx.conf",
            "mkdir /etc/nginx/nlb.d",
        )

        user_data.add_commands(
            "echo 'Installing Python3'",
            "yum install python3 -y",
            "python3 -m venv /venv",
            "/venv/bin/pip install -r /nlb/requirements.txt",
        )

        user_data.add_commands(
            "echo 'Generating nginx configuration'",
            "cd /etc/nginx/nlb.d",
            "/venv/bin/python /nlb/nginx.py",
            "systemctl start nginx",
        )

        user_data.add_commands(
            "echo 'Setting up SOCKS proxy'",
            "useradd pproxy",
            "cp /nlb/pproxy.service /etc/systemd/system/",
            "systemctl daemon-reload",
            "systemctl enable pproxy.service",
            "systemctl start pproxy.service",
        )

        asg = AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.nano"),
            machine_image=MachineImage.latest_amazon_linux(
                generation=AmazonLinuxGeneration.AMAZON_LINUX_2),
            min_capacity=2,
            vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC,
                                        one_per_az=True),
            user_data=user_data,
            health_check=HealthCheck.elb(grace=Duration.seconds(0)),
        )
        asg.add_security_group(ecs_security_group)

        asg.role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))
        asset.grant_read(asg.role)
        policy = ManagedPolicy(self, "Policy")
        policy_statement = PolicyStatement(
            actions=[
                "ec2:DescribeInstances",
                "ecs:DescribeContainerInstances",
                "ecs:DescribeTasks",
                "ecs:ListContainerInstances",
                "ecs:ListServices",
                "ecs:ListTagsForResource",
                "ecs:ListTasks",
            ],
            resources=["*"],
        )
        policy.add_statements(policy_statement)
        asg.role.add_managed_policy(policy)

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.security_group = asg.node.children[0]

        listener_https.add_targets(
            subdomain_name=self.admin_subdomain_name,
            port=80,
            target=asg,
            priority=2,
        )

        # Create a Security Group so the lambdas can access the EC2.
        # This is needed to check if the EC2 instance is fully booted.
        lambda_security_group = SecurityGroup(
            self,
            "LambdaSG",
            vpc=vpc,
        )
        self.security_group.add_ingress_rule(
            peer=lambda_security_group,
            connection=Port.tcp(80),
            description="Lambda to target",
        )

        self.security_group.add_ingress_rule(
            peer=ecs_source_security_group,
            connection=Port.udp(8080),
            description="ECS to target",
        )

        self.create_ecs_lambda(
            cluster=cluster,
            auto_scaling_group=asg,
        )

        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING,
            timeout=Duration.seconds(180),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )
        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING,
            timeout=Duration.seconds(30),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )

        # Initialize the NLB record on localhost, as we need to be able to
        # reference it for other entries to work correctly.
        ARecord(
            self,
            "ARecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        AaaaRecord(
            self,
            "AAAARecord",
            target=RecordTarget.from_ip_addresses("::1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        # To make things a bit easier, also alias to staging.
        self.create_alias(self, "nlb.staging")

        # Create a record for the internal DNS
        ARecord(
            self,
            "APrivateRecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=self.private_zone,
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )

        if g_nlb is not None:
            raise Exception("Only a single NlbStack instance can exist")
        g_nlb = self
コード例 #21
0
 def __attach_manager_full_access(self):
     self._master_security_group.connections.allow_from(
         other=self._manager_security_group.connections,
         port_range=Port.all_traffic())
コード例 #22
0
ファイル: ecs_stack.py プロジェクト: DURGA555666/ECS
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        EcsStack.readConfig(0)
        vpc = ec.Vpc(
            self,
            "Main",
            cidr="11.0.0.0/26",
            max_azs=2,
            nat_gateways=1,
            subnet_configuration=[
                ec.SubnetConfiguration(name="public",
                                       cidr_mask=28,
                                       subnet_type=ec.SubnetType.PUBLIC),
                ec.SubnetConfiguration(name="private",
                                       cidr_mask=28,
                                       subnet_type=ec.SubnetType.PRIVATE)
            ])

        cluster = ecs.Cluster(self, "TestingCluster", vpc=vpc)
        # defining the task iam role
        taskRole = iam.Role(
            self,
            id="taskRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'),
                iam.ServicePrincipal(service='ec2.amazonaws.com')),
            role_name="webmaintaskRole",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonRDSFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonDynamoDBFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonRedshiftFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonKinesisFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonECSTaskExecutionRolePolicy"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSNSFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaRole"),
                iam.ManagedPolicy(self,
                                  id="ManagedPolicy",
                                  managed_policy_name="Grant_dev",
                                  statements=[
                                      iam.PolicyStatement(actions=[
                                          "kms:Decrypt",
                                          "secretemanager:GetSecreteValue"
                                      ],
                                                          resources=["*"])
                                  ])
            ])
        # taskRole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSFullAccess"))

        # WebApp Main task Defenition & Service
        webmain_task_definition = ecs.FargateTaskDefinition(
            self,
            "WebAppMain",
            memory_limit_mib=512,
            cpu=256,
            task_role=taskRole,
            execution_role=taskRole)
        webmain_container = webmain_task_definition.add_container(
            "webapp-mainContainer",
            image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"),
            environment=EcsStack.commands,
            docker_labels={
                "com.datadoghq.ad.instances":
                "[{\"host\": \"%%host%%\", \"port\": 80}]",
                "com.datadoghq.ad.check_names": "[\"ecs_fargate\"]",
                "com.datadoghq.ad.init_configs": "[{}]"
            },
            logging=ecs.LogDriver.aws_logs(stream_prefix="awslogs"))
        # Clearing the environment vairables from the commands(Map)
        EcsStack.commands.clear()
        EcsStack.readConfig(1)
        webmain_datadog_container = webmain_task_definition.add_container(
            "webapp-main_datadog_Container",
            image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"),
            environment=EcsStack.commands)

        webmain_port_mapping = ecs.PortMapping(container_port=80,
                                               host_port=80,
                                               protocol=ecs.Protocol.TCP)
        datadog_port_mapping1 = ecs.PortMapping(container_port=8126,
                                                host_port=8126,
                                                protocol=ecs.Protocol.TCP)
        datadog_port_mapping2 = ecs.PortMapping(container_port=8125,
                                                host_port=8125,
                                                protocol=ecs.Protocol.TCP)
        webmain_container.add_port_mappings(webmain_port_mapping)
        webmain_datadog_container.add_port_mappings(datadog_port_mapping1)
        webmain_datadog_container.add_port_mappings(datadog_port_mapping2)
        # Security group for service
        webmain_sg = ec.SecurityGroup(self,
                                      "webmain_sg",
                                      vpc=vpc,
                                      allow_all_outbound=True,
                                      security_group_name="WebAppMain")
        webmain_sg.add_ingress_rule(peer=Peer.ipv4("202.65.133.194/32"),
                                    connection=Port.tcp(5432))
        webmain_service = ecs.FargateService(
            self,
            "webapp-main",
            cluster=cluster,
            task_definition=webmain_task_definition,
            desired_count=1,
            security_group=webmain_sg)
        # defining the load balancer
        webmain_lb = elbv2.ApplicationLoadBalancer(
            self,
            "LB",
            vpc=vpc,
            internet_facing=True,
            load_balancer_name="WebAppMain",
            # security_group=
            vpc_subnets=ec.SubnetSelection(subnet_type=ec.SubnetType.PUBLIC))
        webmain_target_grp = elbv2.ApplicationTargetGroup(
            self,
            id="webapp-main-target",
            port=80,
            protocol=elbv2.ApplicationProtocol.HTTP,
            health_check=elbv2.HealthCheck(healthy_http_codes="200-399",
                                           healthy_threshold_count=2,
                                           unhealthy_threshold_count=2,
                                           port="traffic-port",
                                           protocol=elbv2.Protocol.HTTP,
                                           timeout=core.Duration.seconds(6),
                                           interval=core.Duration.seconds(10)),
            targets=[webmain_service],
            target_group_name="WebAppMain",
            target_type=elbv2.TargetType.IP,
            vpc=vpc)
        listener = webmain_lb.add_listener(
            "webMain_Listener",
            port=443,
            open=True,
            default_target_groups=[webmain_target_grp],
            certificate_arns=[
                "arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b"
            ])
        listener2 = webmain_lb.add_listener(
            "webMain_Listener2",
            port=80,
            # default_target_groups=[webmain_target_grp]
        )

        # elbv2.ApplicationListenerCertificate(self,"WebAppMAin_Certificate",listener=listener,certificate_arns=["arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b"])
        listener2.add_redirect_response(id="HttptoHttps",
                                        status_code="HTTP_301",
                                        port="443",
                                        protocol="HTTPS")
コード例 #23
0
 def __attach_public_access_rule(self):
     public_access_port = Port.tcp_range(start_port=30000, end_port=32767)
     self._worker_security_group.connections.allow_from_any_ipv4(
         port_range=public_access_port)
コード例 #24
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 policy: Policy, cluster: ICluster, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        policy.add_stack(self)

        if deployment == Deployment.PRODUCTION:
            desired_count = 2
            priority = 61
            master_port = 3978
            dynamodb_prefix = "P-"
        else:
            desired_count = 1
            priority = 161
            master_port = 4978
            dynamodb_prefix = "S-"

        sentry_dsn = parameter_store.add_secure_string(
            f"/MasterServer/{deployment.value}/SentryDSN").parameter

        self.container = ECSHTTPSContainer(
            self,
            self.application_name,
            subdomain_name=self.subdomain_name,
            deployment=deployment,
            policy=policy,
            application_name=self.application_name,
            image_name="ghcr.io/openttd/master-server",
            port=80,
            memory_limit_mib=64,
            desired_count=desired_count,
            cluster=cluster,
            priority=priority,
            command=[
                "--app",
                "master_server",
                "--bind",
                "0.0.0.0",
                "--msu-port",
                str(master_port),
                "--db",
                "dynamodb",
                "--dynamodb-region",
                self.region,
                "--dynamodb-prefix",
                dynamodb_prefix,
                "--proxy-protocol",
                "--socks-proxy",
                "socks5://nlb.openttd.internal:8080",
            ],
            environment={
                "MASTER_SERVER_SENTRY_ENVIRONMENT": deployment.value.lower(),
            },
            secrets={
                "MASTER_SERVER_SENTRY_DSN":
                Secret.from_ssm_parameter(sentry_dsn),
            },
        )

        table_and_index = []
        for table in ("S-MSU-ip-port", "S-MSU-server", "P-MSU-ip-port",
                      "P-MSU-server"):
            table_and_index.extend([
                f"arn:aws:dynamodb:{self.region}:{self.account}:table/{table}",
                f"arn:aws:dynamodb:{self.region}:{self.account}:table/{table}/index/online_view",
                f"arn:aws:dynamodb:{self.region}:{self.account}:table/{table}/index/time_last_seen_view",
            ])

        self.container.task_role.add_to_policy(
            PolicyStatement(
                actions=[
                    "dynamodb:CreateTable",
                    "dynamodb:UpdateTimeToLive",
                    "dynamodb:PutItem",
                    "dynamodb:DescribeTable",
                    "dynamodb:ListTables",
                    "dynamodb:GetItem",
                    "dynamodb:Query",
                    "dynamodb:UpdateItem",
                ],
                resources=table_and_index,
            ))

        self.container.add_udp_port(master_port)
        nlb.add_nlb(self, self.container.service, Port.udp(master_port),
                    self.nlb_subdomain_name, "Master Server")
コード例 #25
0
    def __init__(self, scope: core.Construct, id: str, deploy_env: str,
                 vpc: aws_ec2.Vpc, db_redis_stack: RdsElasticacheEfsStack,
                 config: dict, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.config = config
        self.deploy_env = deploy_env
        self.db_port = DB_PORT
        # cannot map volumes to Fargate task defs yet - so this is done via Boto3 since CDK does not
        # support it yet: https://github.com/aws/containers-roadmap/issues/825
        #self.efs_file_system_id = db_redis_stack.efs_file_system_id
        cluster_name = get_cluster_name(deploy_env)
        self.cluster = ecs.Cluster(self,
                                   cluster_name,
                                   cluster_name=cluster_name,
                                   vpc=vpc)
        pwd_secret = ecs.Secret.from_ssm_parameter(
            StringParameter.from_secure_string_parameter_attributes(
                self,
                f"dbpwd-{deploy_env}",
                version=1,
                parameter_name="postgres_pwd"))
        self.secrets = {"POSTGRES_PASSWORD": pwd_secret}
        environment = {
            "EXECUTOR":
            "Celery",
            "POSTGRES_HOST":
            db_redis_stack.db_host,
            "POSTGRES_PORT":
            str(self.db_port),
            "POSTGRES_DB":
            "airflow",
            "POSTGRES_USER":
            self.config["dbadmin"],
            "REDIS_HOST":
            db_redis_stack.redis_host,
            "VISIBILITY_TIMEOUT":
            str(self.config["celery_broker_visibility_timeout"])
        }
        image_asset = DockerImageAsset(self,
                                       "AirflowImage",
                                       directory="build",
                                       repository_name=config["ecr_repo_name"])
        self.image = ecs.ContainerImage.from_docker_image_asset(image_asset)
        # web server - this initializes the db so must happen first
        self.web_service = self.airflow_web_service(environment)
        # https://github.com/aws/aws-cdk/issues/1654
        self.web_service_sg().connections.allow_to_default_port(
            db_redis_stack.postgres_db, 'allow PG')
        redis_port_info = Port(protocol=Protocol.TCP,
                               string_representation="allow to redis",
                               from_port=REDIS_PORT,
                               to_port=REDIS_PORT)
        worker_port_info = Port(protocol=Protocol.TCP,
                                string_representation="allow to worker",
                                from_port=AIRFLOW_WORKER_PORT,
                                to_port=AIRFLOW_WORKER_PORT)
        redis_sg = SecurityGroup.from_security_group_id(
            self,
            id=f"Redis-SG-{deploy_env}",
            security_group_id=db_redis_stack.redis.vpc_security_group_ids[0])
        bastion_sg = db_redis_stack.bastion.connections.security_groups[0]
        self.web_service_sg().connections.allow_to(redis_sg, redis_port_info,
                                                   'allow Redis')
        self.web_service_sg().connections.allow_to_default_port(
            db_redis_stack.efs_file_system)
        # scheduler
        self.scheduler_service = self.create_scheduler_ecs_service(environment)
        # worker
        self.worker_service = self.worker_service(environment)
        self.scheduler_sg().connections.allow_to_default_port(
            db_redis_stack.postgres_db, 'allow PG')
        self.scheduler_sg().connections.allow_to(redis_sg, redis_port_info,
                                                 'allow Redis')
        self.scheduler_sg().connections.allow_to_default_port(
            db_redis_stack.efs_file_system)

        self.worker_sg().connections.allow_to_default_port(
            db_redis_stack.postgres_db, 'allow PG')
        self.worker_sg().connections.allow_to(redis_sg, redis_port_info,
                                              'allow Redis')
        self.worker_sg().connections.allow_to_default_port(
            db_redis_stack.efs_file_system)
        # When you start an airflow worker, airflow starts a tiny web server
        # subprocess to serve the workers local log files to the airflow main
        # web server, who then builds pages and sends them to users. This defines
        # the port on which the logs are served. It needs to be unused, and open
        # visible from the main web server to connect into the workers.
        self.web_service_sg().connections.allow_to(self.worker_sg(),
                                                   worker_port_info,
                                                   'web service to worker')
コード例 #26
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user',
            secret_name=NEO4J_USER_SECRET_NAME
        )
       
        neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance',
            assumed_by=ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh
                ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy')
            ],
            inline_policies={
                "work-with-tags": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ec2:CreateTags',
                                'ec2:Describe*',
                                'elasticloadbalancing:Describe*',
                                'cloudwatch:ListMetrics',
                                'cloudwatch:GetMetricStatistics',
                                'cloudwatch:Describe*',
                                'autoscaling:Describe*',
                            ],
                            resources=["*"]
                        )
                    ]
                ),
                "access-neo4j-user-secret": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=['secretsmanager:GetSecretValue'],
                            resources=[self.neo4j_user_secret.secret_arn]
                        )
                    ]
                )
            }
        )

        instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance",
            description="Altimeter Neo4j Server Instance",
            vpc=vpc
        )

        instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING
        # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING

        # Prepare userdata script
        with open("./resources/neo4j-server-instance-userdata.sh", "r") as f:
            userdata_content = f.read()
        userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME)
        user_data = UserData.for_linux()
        user_data.add_commands(userdata_content)

        instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM)

        self.instance = Instance(self, 'ec2-instance-neo4j-server-instance',
            instance_name="instance-altimeter--neo4j-community-standalone-server",
            machine_image=MachineImage.generic_linux(
                ami_map={
                    "eu-west-1": "ami-00c8631d384ad7c53"
                }
            ),
            instance_type=instance_type,
            role=neo4j_server_instance_role,
            vpc=vpc,
            # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets),
            security_group=instance_security_group,
            user_data=user_data,
            block_devices=[
                BlockDevice(
                    device_name="/dev/sda1",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=10,
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True, # Just encrypt
                        delete_on_termination=True
                    )                    
                ),
                BlockDevice(
                    device_name="/dev/sdb",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=20, # TODO: Check size requirements
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True,
                        delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results.
                    )                    
                )
            ]
        )

        cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE")
        cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")
コード例 #27
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        *,
        deployment: Deployment,
        policy: Policy,
        cluster: ICluster,
        bucket: Bucket,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        policy.add_stack(self)

        if deployment == Deployment.PRODUCTION:
            desired_count = 2
            priority = 44
            memory = 256
            github_url = "https://github.com/OpenTTD/BaNaNaS"
            content_port = 3978
            bootstrap_command = ["--bootstrap-unique-id", "4f474658"]
        else:
            desired_count = 1
            priority = 144
            memory = 128
            github_url = "https://github.com/OpenTTD/BaNaNaS-staging"
            content_port = 4978
            bootstrap_command = []

        cdn_fqdn = dns.subdomain_to_fqdn("bananas.cdn")
        cdn_url = f"http://{cdn_fqdn}"

        sentry_dsn = parameter_store.add_secure_string(f"/BananasServer/{deployment.value}/SentryDSN").parameter
        reload_secret = parameter_store.add_secure_string(f"/BananasServer/{deployment.value}/ReloadSecret").parameter

        command = [
            "--storage",
            "s3",
            "--storage-s3-bucket",
            bucket.bucket_name,
            "--index",
            "github",
            "--index-github-url",
            github_url,
            "--cdn-url",
            cdn_url,
            "--bind",
            "0.0.0.0",
            "--content-port",
            str(content_port),
            "--proxy-protocol",
        ]
        command.extend(bootstrap_command)

        self.container = ECSHTTPSContainer(
            self,
            self.application_name,
            subdomain_name=self.subdomain_name,
            path_pattern=self.path_pattern,
            allow_via_http=True,
            deployment=deployment,
            policy=policy,
            application_name=self.application_name,
            image_name="ghcr.io/openttd/bananas-server",
            port=80,
            memory_limit_mib=memory,
            desired_count=desired_count,
            cluster=cluster,
            priority=priority,
            command=command,
            environment={
                "BANANAS_SERVER_SENTRY_ENVIRONMENT": deployment.value.lower(),
            },
            secrets={
                "BANANAS_SERVER_SENTRY_DSN": Secret.from_ssm_parameter(sentry_dsn),
                "BANANAS_SERVER_RELOAD_SECRET": Secret.from_ssm_parameter(reload_secret),
            },
        )

        self.container.add_port(content_port)
        nlb.add_nlb(self, self.container.service, Port.tcp(content_port), self.nlb_subdomain_name, "BaNaNaS Server")

        self.container.task_role.add_to_policy(
            PolicyStatement(
                actions=[
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    bucket.bucket_arn,
                    StringConcat().join(bucket.bucket_arn, "/*"),
                ],
            )
        )
コード例 #28
0
ファイル: stack.py プロジェクト: schemadesign/schema_cms
    def __init__(
        self,
        scope: App,
        id: str,
        envs: EnvSettings,
        components: ComponentsStack,
        base_resources: BaseResources,
    ):
        super().__init__(scope, id)

        self.db_secret_arn = Fn.import_value(
            BaseResources.get_database_secret_arn_output_export_name(envs))

        self.job_processing_queues = components.data_processing_queues
        self.vpc = base_resources.vpc
        self.db = base_resources.db

        self.app_bucket = Bucket(self, "App", versioned=True)

        if self.app_bucket.bucket_arn:
            CfnOutput(
                self,
                id="AppBucketOutput",
                export_name=self.get_app_bucket_arn_output_export_name(envs),
                value=self.app_bucket.bucket_arn,
            )

        self.pages_bucket = Bucket(self, "Pages", public_read_access=True)

        self.domain_name = StringParameter.from_string_parameter_name(
            self,
            "DomainNameParameter",
            string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value

        self.certificate_arn = StringParameter.from_string_parameter_name(
            self,
            "CertificateArnParameter",
            string_parameter_name="/schema-cms-app/CERTIFICATE_ARN"
        ).string_value

        django_secret = Secret(self,
                               "DjangoSecretKey",
                               secret_name="SCHEMA_CMS_DJANGO_SECRET_KEY")
        lambda_auth_token_secret = Secret(
            self,
            "LambdaAuthToken",
            secret_name="SCHEMA_CMS_LAMBDA_AUTH_TOKEN")

        if lambda_auth_token_secret.secret_arn:
            CfnOutput(
                self,
                id="lambdaAuthTokenArnOutput",
                export_name=self.get_lambda_auth_token_arn_output_export_name(
                    envs),
                value=lambda_auth_token_secret.secret_arn,
            )

        self.django_secret_key = EcsSecret.from_secrets_manager(django_secret)
        self.lambda_auth_token = EcsSecret.from_secrets_manager(
            lambda_auth_token_secret)

        tag_from_context = self.node.try_get_context("app_image_tag")
        tag = tag_from_context if tag_from_context != "undefined" else None

        api_image = ContainerImage.from_ecr_repository(
            repository=Repository.from_repository_name(
                self,
                id="BackendRepository",
                repository_name=BaseECR.get_backend_repository_name(envs)),
            tag=tag,
        )
        nginx_image = ContainerImage.from_ecr_repository(
            repository=Repository.from_repository_name(
                self,
                id="NginxRepository",
                repository_name=BaseECR.get_nginx_repository_name(envs)),
            tag=tag,
        )

        self.api = ApplicationLoadBalancedFargateService(
            self,
            "ApiService",
            service_name=f"{envs.project_name}-api-service",
            cluster=Cluster.from_cluster_attributes(
                self,
                id="WorkersCluster",
                cluster_name="schema-ecs-cluster",
                vpc=self.vpc,
                security_groups=[],
            ),
            task_image_options=ApplicationLoadBalancedTaskImageOptions(
                image=nginx_image,
                container_name="nginx",
                container_port=80,
                enable_logging=True,
            ),
            desired_count=1,
            cpu=512,
            memory_limit_mib=1024,
            certificate=Certificate.from_certificate_arn(
                self, "Cert", certificate_arn=self.certificate_arn),
            domain_name=self.domain_name,
            domain_zone=PrivateHostedZone(
                self,
                "zone",
                vpc=self.vpc,
                zone_name=self.domain_name,
            ),
        )

        self.api.task_definition.add_container(
            "backend",
            image=api_image,
            command=[
                "sh", "-c",
                "/bin/chamber exec $CHAMBER_SERVICE_NAME -- ./scripts/run.sh"
            ],
            logging=AwsLogDriver(stream_prefix="backend-container"),
            environment={
                "POSTGRES_DB": envs.data_base_name,
                "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name,
                "AWS_STORAGE_PAGES_BUCKET_NAME": self.pages_bucket.bucket_name,
                "SQS_WORKER_QUEUE_URL":
                self.job_processing_queues[0].queue_url,
                "SQS_WORKER_EXT_QUEUE_URL":
                self.job_processing_queues[1].queue_url,
                "SQS_WORKER_MAX_QUEUE_URL":
                self.job_processing_queues[2].queue_url,
                "CHAMBER_SERVICE_NAME": "schema-cms-app",
                "CHAMBER_KMS_KEY_ALIAS": envs.project_name,
            },
            secrets={
                "DB_CONNECTION":
                EcsSecret.from_secrets_manager(
                    Secret.from_secret_arn(self,
                                           id="DbSecret",
                                           secret_arn=self.db_secret_arn)),
                "DJANGO_SECRET_KEY":
                self.django_secret_key,
                "LAMBDA_AUTH_TOKEN":
                self.lambda_auth_token,
            },
            cpu=512,
            memory_limit_mib=1024,
        )

        self.django_secret_key.grant_read(
            self.api.service.task_definition.task_role)

        self.app_bucket.grant_read_write(
            self.api.service.task_definition.task_role)
        self.pages_bucket.grant_read_write(
            self.api.service.task_definition.task_role)

        for queue in self.job_processing_queues:
            queue.grant_send_messages(
                self.api.service.task_definition.task_role)

        self.api.service.connections.allow_to(self.db.connections,
                                              Port.tcp(5432))
        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=["ses:SendRawEmail", "ses:SendBulkTemplatedEmail"],
                resources=["*"],
            ))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=[
                    "kms:Get*", "kms:Describe*", "kms:List*", "kms:Decrypt"
                ],
                resources=[
                    Fn.import_value(
                        BaseKMS.get_kms_arn_output_export_name(envs))
                ],
            ))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(actions=["ssm:DescribeParameters"],
                            resources=["*"]))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=["ssm:GetParameters*"],
                resources=[
                    f"arn:aws:ssm:{self.region}:{self.account}:parameter/schema-cms-app/*"
                ],
            ))