Пример #1
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        func = lambda_edge.create_function(
            self,
            f"Preview{deployment.value}IndexRedirect",
            runtime=Runtime.NODEJS_10_X,
            handler="index.handler",
            code=Code.from_asset("./lambdas/preview-redirect"),
        )

        s3_cloud_front = S3CloudFront(
            self,
            "S3CloudFront",
            subdomain_name=self.subdomain_name,
            error_folder="/errors",
            lambda_function_associations=[
                LambdaFunctionAssociation(
                    event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                    lambda_function=func,
                ),
            ],
        )

        S3CloudFrontPolicy(
            self,
            "S3cloudFrontPolicy",
            s3_cloud_front=s3_cloud_front,
        )
Пример #2
0
 def get_cluster(self, vpc):
     cluster = Cluster(self._stack, self._name, vpc=vpc)
     Tags.of(cluster).add('hostedZoneId', self._config.hosted_zone_id)
     Tags.of(cluster).add('domain', self._config.domain)
     self._export('ClusterName', cluster.cluster_name)
     self._tag_it(cluster)
     return cluster
Пример #3
0
def add_tags_to_stack(stack: Stack, config: Dict) -> None:
    # Add common tags
    for tag_key in config['tags']:
        Tags.of(stack).add(key=tag_key, value=config['tags'][tag_key])

    # Add environment in the tags
    Tags.of(stack).add(key='stage', value=config['stage'])
Пример #4
0
    def __init__(self, scope: Construct, id: str, context: "Context",
                 team_context: "TeamContext", parameters: Dict[str,
                                                               Any]) -> None:

        super().__init__(
            scope=scope,
            id=id,
            stack_name=id,
            env=Environment(account=context.account_id, region=context.region),
        )
        Tags.of(scope=cast(IConstruct, self)).add(
            key="Env", value=f"orbit-{context.name}")

        if team_context.eks_pod_role_arn is None:
            raise ValueError("Pod Role arn required")
        team_role = iam.Role.from_role_arn(
            scope=self,
            id="team-role",
            role_arn=team_context.eks_pod_role_arn,
            mutable=True)
        team_role.attach_inline_policy(policy=iam.Policy(
            scope=self,
            id="emr_on_eks",
            policy_name="emr_on_eks",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "emr-containers:StartJobRun",
                        "emr-containers:ListJobRuns",
                        "emr-containers:DescribeJobRun",
                        "emr-containers:CancelJobRun",
                        "emr-containers:TagResource",
                    ],
                    resources=[parameters.get("virtual_arn", "*")],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "logs:*",
                    ],
                    resources=[
                        f"arn:aws:logs:{context.region}:{context.account_id}:log-group:/orbit/emr/*",
                        f"arn:aws:logs:{context.region}:{context.account_id}:log-group:/orbit/emr/*:log-stream:*",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "emr-containers:Get*",
                        "emr-containers:Describe*",
                        "emr-containers:List*",
                        "elasticmapreduce:CreatePersistentAppUI",
                        "elasticmapreduce:DescribePersistentAppUI",
                        "elasticmapreduce:GetPersistentAppUIPresignedURL",
                    ],
                    resources=["*"],
                ),
            ],
        ))
    def __init__(self, scope: Construct, stack_id: str, deploy_env: str,
                 **kwargs: Any) -> None:
        super().__init__(scope, stack_id, **kwargs)

        ############################################################################################
        # ### NETWORKING ###########################################################################
        ############################################################################################

        # create new VPC
        aws_ec2.Vpc(
            self,
            "datalake",
            # cidr='10.0.0.0/16',  # TODO: use specific CIDR
            subnet_configuration=[
                aws_ec2.SubnetConfiguration(
                    cidr_mask=27,
                    name="public",
                    subnet_type=aws_ec2.SubnetType.PUBLIC),
                aws_ec2.SubnetConfiguration(
                    cidr_mask=20,
                    name="ecs-cluster",
                    subnet_type=aws_ec2.SubnetType.PRIVATE),
                aws_ec2.SubnetConfiguration(
                    name="reserved",
                    subnet_type=aws_ec2.SubnetType.PRIVATE,
                    reserved=True,
                ),
            ],
            max_azs=99 if deploy_env == "prod" else 1,
        )

        Tags.of(self).add("ApplicationLayer",
                          "networking")  # type: ignore[arg-type]
    def __init__(
        self,
        scope: Construct,
        id: str,
        context: "Context",
        team_context: "TeamContext",
        parameters: Dict[str, Any],
    ) -> None:

        super().__init__(
            scope=scope,
            id=id,
            stack_name=id,
            env=Environment(account=context.account_id, region=context.region),
        )
        Tags.of(scope=cast(IConstruct, self)).add(
            key="Env", value=f"orbit-{context.name}")
        _logger.info(f"Plugin parameters: {parameters}")
        # just showing how to create resource.  Do not forget to update the IAM policy or make sure the attached policy
        # for the team is allowing the creation and destruction of the resource.
        ssm_parameter: str = f"/orbit/{context.name}/{team_context.name}/hello-plugin"
        ssm.StringParameter(
            scope=self,
            id="param",
            string_value="testing plugin hello world",
            parameter_name=ssm_parameter,
        )
Пример #7
0
    def __init__(self, scope: Construct, stack_id: str, env_name: str) -> None:
        super().__init__(scope, stack_id)

        ############################################################################################
        # ### NETWORKING ###########################################################################
        ############################################################################################

        # create new VPC
        aws_ec2.Vpc(
            self,
            "geostore",
            # cidr='10.0.0.0/16',  # TODO: use specific CIDR
            subnet_configuration=[
                aws_ec2.SubnetConfiguration(
                    cidr_mask=27, name="public", subnet_type=aws_ec2.SubnetType.PUBLIC
                ),
                aws_ec2.SubnetConfiguration(
                    cidr_mask=20, name="ecs-cluster", subnet_type=aws_ec2.SubnetType.PRIVATE
                ),
                aws_ec2.SubnetConfiguration(
                    name="reserved",
                    subnet_type=aws_ec2.SubnetType.PRIVATE,
                    reserved=True,
                ),
            ],
            max_azs=99 if env_name == PRODUCTION_ENVIRONMENT_NAME else 1,
        )

        Tags.of(self).add("ApplicationLayer", "networking")  # type: ignore[arg-type]
Пример #8
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 policy: Policy, cluster: ICluster, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        policy.add_stack(self)

        if deployment == Deployment.PRODUCTION:
            desired_count = 2
            priority = 50
        else:
            desired_count = 1
            priority = 150

        ECSHTTPSContainer(
            self,
            self.application_name,
            subdomain_name=self.subdomain_name,
            deployment=deployment,
            policy=policy,
            application_name=self.application_name,
            image_name="ghcr.io/openttd/binaries-redirect",
            port=80,
            memory_limit_mib=16,
            desired_count=desired_count,
            cluster=cluster,
            priority=priority,
            allow_via_http=True,
        )
Пример #9
0
    def __init__(
        self,
        scope: Construct,
        stack_id: str,
        *,
        deploy_env: str,
        storage_bucket: aws_s3.Bucket,
        **kwargs: Any,
    ) -> None:
        super().__init__(scope, stack_id, **kwargs)

        account_principal = aws_iam.AccountPrincipal(account_id=276514628126)
        role = aws_iam.Role(
            self,
            "koordinates-read-role",
            role_name=f"koordinates-s3-access-read-{deploy_env}",
            assumed_by=account_principal,  # type: ignore[arg-type]
            external_id={"prod":
                         "koordinates-jAddR"}.get(deploy_env,
                                                  "koordinates-4BnJQ"),
            max_session_duration=MAX_SESSION_DURATION,
        )
        storage_bucket.grant_read(role)  # type: ignore[arg-type]

        Tags.of(self).add("ApplicationLayer", "lds")  # type: ignore[arg-type]
Пример #10
0
    def __init__(self, scope: Construct, stack_id: str, *, deploy_env: str,
                 **kwargs: Any) -> None:
        super().__init__(scope, stack_id, **kwargs)

        ############################################################################################
        # ### DATASET STAGING S3 BUCKET ############################################################
        ############################################################################################
        self.staging_bucket = aws_s3.Bucket(
            self,
            "dataset-staging-bucket",
            access_control=aws_s3.BucketAccessControl.PRIVATE,
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            removal_policy=RemovalPolicy.DESTROY,
        )

        self.staging_bucket_name_parameter = aws_ssm.StringParameter(
            self,
            "staging bucket name",
            description=f"Staging bucket name for {deploy_env}",
            parameter_name=ParameterName.STAGING_BUCKET_NAME.value,
            string_value=self.staging_bucket.bucket_name,
        )

        Tags.of(self).add("ApplicationLayer",
                          "staging")  # type: ignore[arg-type]
Пример #11
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        bucket_site = Bucket(
            self,
            "Site",
            block_public_access=BlockPublicAccess.BLOCK_ALL,
        )

        bucket_access_logs = Bucket(
            self,
            "AccessLogs",
            encryption=BucketEncryption.S3_MANAGED,
            block_public_access=BlockPublicAccess.BLOCK_ALL,
        )

        for subdomain_name in self.subdomain_names:
            func_version = lambda_edge.create_function(
                self,
                f"Redirect-{subdomain_name}-{deployment.value}",
                runtime=Runtime.NODEJS_10_X,
                handler="index.handler",
                code=Code.from_asset(f"./lambdas/redirect-{subdomain_name}"),
            )

            if subdomain_name == "grfsearch":
                S3CloudFrontV2(
                    self,
                    f"S3CloudFront-{subdomain_name}",
                    subdomain_name=subdomain_name,
                    bucket_site=bucket_site,
                    bucket_access_logs=bucket_access_logs,
                    edge_lambdas=[
                        EdgeLambda(
                            event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                            function_version=func_version,
                        ),
                    ],
                    forward_query_string=True,
                    forward_query_string_cache_keys=["do", "q"],
                )
            else:
                S3CloudFront(
                    self,
                    f"S3CloudFront-{subdomain_name}",
                    subdomain_name=subdomain_name,
                    bucket_site=bucket_site,
                    bucket_access_logs=bucket_access_logs,
                    lambda_function_associations=[
                        LambdaFunctionAssociation(
                            event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                            lambda_function=func_version,
                        ),
                    ],
                )
Пример #12
0
 def _create_cluster_pod_security_group(self) -> ec2.SecurityGroup:
     name = f"orbit-{self.context.name}-cluster-pod-sg"
     sg = ec2.SecurityGroup(
         scope=self,
         id="cluster-pod-security-group",
         security_group_name=name,
         vpc=self.i_vpc,
     )
     Tags.of(scope=sg).add(key="Name", value=name)
     return sg
Пример #13
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        global g_tasks

        Tags.of(self).add("Stack", "Common-Tasks")

        if g_tasks is not None:
            raise Exception("Only a single TasksStack instance can exist")
        g_tasks = self
Пример #14
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 policy: Policy, cluster: ICluster, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        policy.add_stack(self)

        if deployment == Deployment.PRODUCTION:
            desired_count = 1  # Currently this pod is stateful, and as such cannot be run more than once
            priority = 46
        else:
            desired_count = 1
            priority = 146

        api_fqdn = dns.subdomain_to_fqdn("api.bananas")
        api_url = f"https://{api_fqdn}"
        frontend_fqdn = dns.subdomain_to_fqdn(self.subdomain_name)
        frontend_url = f"https://{frontend_fqdn}"

        sentry_dsn = parameter_store.add_secure_string(
            f"/BananasFrontendWeb/{deployment.value}/SentryDSN").parameter

        ECSHTTPSContainer(
            self,
            self.application_name,
            subdomain_name=self.subdomain_name,
            deployment=deployment,
            policy=policy,
            application_name=self.application_name,
            image_name="ghcr.io/openttd/bananas-frontend-web",
            port=80,
            memory_limit_mib=64,
            desired_count=desired_count,
            cluster=cluster,
            priority=priority,
            command=[
                "--api-url",
                api_url,
                "--frontend-url",
                frontend_url,
                "run",
                "-p",
                "80",
                "-h",
                "0.0.0.0",
            ],
            environment={
                "WEBCLIENT_SENTRY_ENVIRONMENT": deployment.value.lower(),
            },
            secrets={
                "WEBCLIENT_SENTRY_DSN": Secret.from_ssm_parameter(sentry_dsn),
            },
        )
Пример #15
0
    def __init__(self, scope: Construct, id: str, *,
                 alb: IApplicationLoadBalancer, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        global g_listener_https

        Tags.of(self).add("Stack", "Common-Listener-Https")

        self._used_priorities = []
        self._subdomains_cert = {}

        self._alb = alb
        self._listener = ApplicationListener(
            self,
            "Listener-Https",
            load_balancer=alb,
            port=443,
            protocol=ApplicationProtocol.HTTPS,
        )
        # By default, only IPv4 is added to allowed connections
        self._listener.connections.allow_default_port_from(
            other=Peer.any_ipv6(),
            description="Allow from anyone on port 443",
        )
        # Make sure there is always a backend picking up, even if we don't know the host
        self._listener.add_fixed_response(
            "default",
            status_code="404",
            message_body="Page not found",
        )

        # Add a redirect; in case people go to HTTP, redirect them to HTTPS.
        self._http_listener = ApplicationListener(
            self,
            "Listener-Http",
            load_balancer=alb,
            port=80,
            protocol=ApplicationProtocol.HTTP,
        )
        self._http_listener.connections.allow_default_port_from(
            other=Peer.any_ipv6(),
            description="Allow from anyone on port 80",
        )
        self._http_listener.add_redirect_response(
            "Http-To-Https",
            status_code="HTTP_301",
            port="443",
            protocol="HTTPS",
        )

        if g_listener_https is not None:
            raise Exception(
                "Only a single ListenerHTTPSStack instance can exist")
        g_listener_https = self
Пример #16
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        context: "Context",
    ) -> None:
        self.scope = scope
        self.id = id
        self.context = context
        super().__init__(
            scope=scope,
            id=id,
            stack_name=id,
            env=Environment(account=self.context.account_id,
                            region=self.context.region),
        )
        Tags.of(scope=cast(IConstruct, self)).add(
            key="Env", value=f"orbit-{self.context.name}")
        if self.context.networking.vpc_id is None:
            raise ValueError("self.context.networking.vpc_id is None.")
        if self.context.networking.availability_zones is None:
            raise ValueError(
                "self.context.networking.availability_zones is None.")

        self.i_vpc = ec2.Vpc.from_vpc_attributes(
            scope=self,
            id="vpc",
            vpc_id=self.context.networking.vpc_id,
            availability_zones=self.context.networking.availability_zones,
        )
        self.role_eks_cluster = self._create_role_cluster()
        self.role_eks_env_nodegroup = self._create_env_nodegroup_role()
        self.role_fargate_profile = self._create_role_fargate_profile()
        self.role_cluster_autoscaler = self._create_cluster_autoscaler_role()
        if self.context.user_pool_id:
            self.context.cognito_users_url = orbit_cognito.get_users_url(
                user_pool_id=self.context.user_pool_id,
                region=self.context.region)
            cognito_pool_arn: str = orbit_cognito.get_pool_arn(
                user_pool_id=self.context.user_pool_id,
                region=self.context.region,
                account=self.context.account_id)
            self.user_pool: cognito.UserPool = self._get_user_pool(
                user_pool_arn=cognito_pool_arn)
        else:
            raise Exception("Missing Cognito User Pool ID ('user_pool_id') ")
        self.user_pool_client = self._create_user_pool_client()
        self.identity_pool = self._create_identity_pool()
        self.token_validation_lambda = self._create_token_validation_lambda()
        self.eks_service_lambda = self._create_eks_service_lambda()
        self.cluster_pod_security_group = self._create_cluster_pod_security_group(
        )
        self.context_parameter = self._create_manifest_parameter()
        self._create_post_authentication_lambda()
Пример #17
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 policy: Policy, cluster: ICluster, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        policy.add_stack(self)

        if deployment == Deployment.PRODUCTION:
            desired_count = 2
            priority = 62
        else:
            desired_count = 1
            priority = 162

        api_fqdn = dns.subdomain_to_fqdn("api.master")
        api_url = f"https://{api_fqdn}"

        sentry_dsn = parameter_store.add_secure_string(
            f"/MasterServerWeb/{deployment.value}/SentryDSN").parameter

        ECSHTTPSContainer(
            self,
            self.application_name,
            subdomain_name=self.subdomain_name,
            deployment=deployment,
            policy=policy,
            application_name=self.application_name,
            image_name="ghcr.io/openttd/master-server-web",
            port=80,
            memory_limit_mib=96,
            desired_count=desired_count,
            cluster=cluster,
            priority=priority,
            command=[
                "--api-url",
                api_url,
                "run",
                "-p",
                "80",
                "-h",
                "0.0.0.0",
            ],
            environment={
                "WEBCLIENT_SENTRY_ENVIRONMENT": deployment.value.lower(),
            },
            secrets={
                "WEBCLIENT_SENTRY_DSN": Secret.from_ssm_parameter(sentry_dsn),
            },
        )
Пример #18
0
    def __init__(self, scope: Construct, id: str, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Stack", "Common-Ecs")

        self._cluster = Cluster(
            self,
            "Cluster",
            vpc=vpc,
        )

        asg = AutoScalingGroup(
            self,
            "ClusterASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.small"),
            machine_image=EcsOptimizedImage.amazon_linux2(),
            min_capacity=4,
        )
        self._cluster.add_auto_scaling_group(asg)

        # Create a SecurityGroup that the NLB can use to allow traffic from
        # NLB to us. This avoids a cyclic dependency.
        self.security_group = SecurityGroup(
            self,
            "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=False,
        )

        # Only use "source_security_group" to check if flows come from ECS.
        # Do not use it to allow traffic in ECS; use "security_group" for
        # that.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.source_security_group = asg.node.children[0]

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        asg.node.children[0].add_ingress_rule(
            peer=self.security_group,
            connection=Port.tcp_range(32768, 65535),
            description="NLB-self to target",
        )
        asg.node.children[0].add_ingress_rule(
            peer=self.security_group,
            connection=Port.udp_range(32768, 65535),
            description="NLB-self to target (UDP)",
        )
Пример #19
0
    def __init__(self, scope: Construct, id: str, context: "Context",
                 parameters: Dict[str, Any]) -> None:
        super().__init__(
            scope=scope,
            id=id,
            stack_name=id,
            env=Environment(account=context.account_id, region=context.region),
        )
        Tags.of(scope=cast(IConstruct, self)).add(
            key="Env", value=f"orbit-{context.name}")

        _logger.debug(
            "Passing Plugin CDK Stack parameters to the Nested stack Cfn parameters"
        )
        NestedCfnStack(self, id="custom-cfn-stack", parameters=parameters)
Пример #20
0
    def add_nlb(self, scope: Construct, service: IEc2Service, port: Port,
                subdomain_name: str, description: str) -> None:
        port_dict = port.to_rule_json()
        Tags.of(service).add("NLB-protocol", port_dict["ipProtocol"])
        Tags.of(service).add("NLB-port", str(port_dict["fromPort"]))

        self.create_alias(scope, subdomain_name)

        self.security_group.add_ingress_rule(
            peer=Peer.any_ipv6(),
            connection=port,
            description=f"{description} (IPv6)")
        self.security_group.add_ingress_rule(
            peer=Peer.any_ipv4(),
            connection=port,
            description=f"{description} (IPv4)")
Пример #21
0
    def __init__(self, scope: Construct, id: str, context: "Context",
                 team_context: "TeamContext", parameters: Dict[str,
                                                               Any]) -> None:

        super().__init__(
            scope=scope,
            id=id,
            stack_name=id,
            env=Environment(account=context.account_id, region=context.region),
        )
        Tags.of(scope=cast(IConstruct, self)).add(
            key="Env", value=f"orbit-{context.name}")

        # Collecting required parameters
        team_space_props: Dict[str, Any] = {
            "account_id":
            context.account_id,
            "region":
            context.region,
            "partition":
            core.Aws.PARTITION,
            "env_name":
            context.name,
            "teamspace_name":
            team_context.name,
            "lake_role_name":
            f"orbit-{context.name}-{team_context.name}-role",
            "vpc_id":
            context.networking.vpc_id,
            "subnet_ids": [
                s.subnet_id for s in context.networking.public_subnets +
                context.networking.isolated_subnets +
                context.networking.private_subnets
            ],
            "team_security_group_id":
            team_context.team_security_group_id,
            "team_kms_key_arn":
            team_context.team_kms_key_arn,
        }

        self._redshift_clusters = RedshiftClusters(
            self,
            id="redshift-clusters-for-teamspace",
            team_space_props=team_space_props,
            plugin_params=parameters,
        )
Пример #22
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        *,
        deployment: Deployment,
        additional_fqdns: Optional[List[str]] = None,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        func = lambda_edge.create_function(
            self,
            f"BananasCdnRedirect{deployment.value}",
            runtime=Runtime.NODEJS_10_X,
            handler="index.handler",
            code=Code.from_asset("./lambdas/bananas-cdn"),
        )

        s3_cloud_front = S3CloudFront(
            self,
            "S3CloudFront",
            subdomain_name=self.subdomain_name,
            error_folder="/errors",
            lambda_function_associations=[
                LambdaFunctionAssociation(
                    event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                    lambda_function=func,
                ),
            ],
            price_class=PriceClass.PRICE_CLASS_ALL,
            additional_fqdns=additional_fqdns,
            viewer_protocol_policy=ViewerProtocolPolicy.
            ALLOW_ALL,  # OpenTTD client doesn't support HTTPS
        )
        self.bucket = s3_cloud_front.bucket_site

        S3CloudFrontPolicy(
            self,
            "S3cloudFrontPolicy",
            s3_cloud_front=s3_cloud_front,
            with_s3_get_object_access=True,
        )
Пример #23
0
    def __init__(self, scope: Construct, stack_id: str) -> None:
        super().__init__(scope, stack_id)

        ############################################################################################
        # ### DATASET STAGING S3 BUCKET ############################################################
        ############################################################################################
        self.staging_bucket = aws_s3.Bucket(
            self,
            "dataset-staging-bucket",
            bucket_name=ResourceName.STAGING_BUCKET_NAME.value,
            access_control=aws_s3.BucketAccessControl.PRIVATE,
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            removal_policy=RemovalPolicy.DESTROY,
        )

        Tags.of(self).add("ApplicationLayer",
                          "staging")  # type: ignore[arg-type]
Пример #24
0
    def __init__(self, scope: Construct, stack_id: str, *, env_name: str,
                 storage_bucket: aws_s3.Bucket) -> None:
        super().__init__(scope, stack_id)

        account_principal = aws_iam.AccountPrincipal(account_id=276514628126)
        external_id = {
            PRODUCTION_ENVIRONMENT_NAME: "koordinates-jAddR"
        }.get(env_name, "koordinates-4BnJQ")
        role = aws_iam.Role(
            self,
            "koordinates-read-role",
            role_name=f"koordinates-s3-access-read-{env_name}",
            assumed_by=account_principal,  # type: ignore[arg-type]
            external_id=external_id,
            max_session_duration=MAX_SESSION_DURATION,
        )
        storage_bucket.grant_read(role)  # type: ignore[arg-type]

        Tags.of(self).add("ApplicationLayer", "lds")  # type: ignore[arg-type]
Пример #25
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        Tags.of(self).add('landing_zone', self.zone_name)

        self.networking = NetworkingLayer(
            self,
            self.zone_name,
            cidr=self.cidr_block,
            subnet_configuration=self.subnet_configuration)

        self.backup_policy = BackupStrategyConstruct(self,
                                                     'Backup',
                                                     landing_zone=self)

        self.security_group = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            description='Default-SG for {} landing zone'.format(
                self.zone_name),
            vpc=self.vpc,
            allow_all_outbound=True)

        self.security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.all_icmp(),
            description='Grant icmp from anywhere')

        for address in ('72.90.160.65/32', '10.0.0.0/8', '192.168.0.0/16'):
            self.security_group.add_ingress_rule(
                peer=ec2.Peer.ipv4(address),
                connection=ec2.Port.all_traffic(),
                description='Grant any from ' + address)

            self.security_group.add_ingress_rule(
                peer=ec2.Peer.ipv4(address),
                connection=ec2.Port.tcp(3389),
                description='Grant rdp from ' + address)

            self.security_group.add_ingress_rule(
                peer=ec2.Peer.ipv4(address),
                connection=ec2.Port.tcp(22),
                description='Grant ssh from ' + address)
Пример #26
0
    def __init__(self,
                 scope: Construct,
                 id: str,
                 *,
                 deployment: Deployment,
                 additional_fqdns: Optional[List[str]] = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        func = lambda_edge.create_function(
            self,
            "CdnIndexRedirect",
            runtime=Runtime.NODEJS_10_X,
            handler="index.handler",
            code=Code.from_asset("./lambdas/index-redirect"),
        )

        s3_cloud_front = S3CloudFront(
            self,
            "S3CloudFront",
            subdomain_name=self.subdomain_name,
            error_folder="/errors",
            lambda_function_associations=[
                LambdaFunctionAssociation(
                    event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                    lambda_function=func,
                ),
            ],
            additional_fqdns=additional_fqdns,
            price_class=PriceClass.PRICE_CLASS_ALL,
        )

        S3CloudFrontPolicy(
            self,
            "S3cloudFrontPolicy",
            s3_cloud_front=s3_cloud_front,
            with_s3_get_object_access=True,
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc = ec2.Vpc(
            self,
            "Adam_MyVpc",
            cidr="10.13.0.0/21",
            max_azs=2,
            nat_gateways=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="private",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PRIVATE),
                ec2.SubnetConfiguration(name="isolated",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.ISOLATED)
            ])

        Tags.of(vpc).add("Owner", "Adam")
Пример #28
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        global g_lambda_edge

        Tags.of(self).add("Stack", "Common-Lambda-Edge")

        self._role = Role(
            self,
            "EdgeLambdaRole",
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("edgelambda.amazonaws.com"),
            ),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
            ],
        )

        if g_lambda_edge is not None:
            raise Exception("Only a single LambdaEdgeStack instance can exist")
        g_lambda_edge = self
Пример #29
0
    def __init__(
        self,
        scope: Construct,
        stack_id: str,
        *,
        botocore_lambda_layer: aws_lambda_python.PythonLayerVersion,
        env_name: str,
        storage_bucket: aws_s3.Bucket,
        validation_results_table: Table,
    ) -> None:
        # pylint: disable=too-many-locals, too-many-statements

        super().__init__(scope, stack_id)

        ############################################################################################
        # PROCESSING ASSETS TABLE
        processing_assets_table = Table(
            self,
            f"{env_name}-processing-assets",
            env_name=env_name,
            parameter_name=ParameterName.PROCESSING_ASSETS_TABLE_NAME,
            sort_key=aws_dynamodb.Attribute(name="sk", type=aws_dynamodb.AttributeType.STRING),
        )

        ############################################################################################
        # BATCH JOB DEPENDENCIES
        batch_job_queue = BatchJobQueue(
            self,
            "batch-job-queue",
            env_name=env_name,
            processing_assets_table=processing_assets_table,
        ).job_queue

        s3_read_only_access_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name(
            "AmazonS3ReadOnlyAccess"
        )

        ############################################################################################
        # UPDATE CATALOG UPDATE MESSAGE QUEUE

        dead_letter_queue = aws_sqs.Queue(
            self,
            "dead-letter-queue",
            visibility_timeout=LAMBDA_TIMEOUT,
        )

        self.message_queue = aws_sqs.Queue(
            self,
            "update-catalog-message-queue",
            visibility_timeout=LAMBDA_TIMEOUT,
            dead_letter_queue=aws_sqs.DeadLetterQueue(max_receive_count=3, queue=dead_letter_queue),
        )
        self.message_queue_name_parameter = aws_ssm.StringParameter(
            self,
            "update-catalog-message-queue-name",
            string_value=self.message_queue.queue_name,
            description=f"Update Catalog Message Queue Name for {env_name}",
            parameter_name=ParameterName.UPDATE_CATALOG_MESSAGE_QUEUE_NAME.value,
        )

        populate_catalog_lambda = BundledLambdaFunction(
            self,
            "populate-catalog-bundled-lambda-function",
            directory="populate_catalog",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
            botocore_lambda_layer=botocore_lambda_layer,
        )

        self.message_queue.grant_consume_messages(populate_catalog_lambda)
        populate_catalog_lambda.add_event_source(
            SqsEventSource(self.message_queue, batch_size=1)  # type: ignore[arg-type]
        )

        ############################################################################################
        # STATE MACHINE TASKS

        check_stac_metadata_task = LambdaTask(
            self,
            "check-stac-metadata-task",
            directory="check_stac_metadata",
            botocore_lambda_layer=botocore_lambda_layer,
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        assert check_stac_metadata_task.lambda_function.role
        check_stac_metadata_task.lambda_function.role.add_managed_policy(
            policy=s3_read_only_access_policy
        )

        for table in [processing_assets_table, validation_results_table]:
            table.grant_read_write_data(check_stac_metadata_task.lambda_function)
            table.grant(
                check_stac_metadata_task.lambda_function,
                "dynamodb:DescribeTable",
            )

        content_iterator_task = LambdaTask(
            self,
            "content-iterator-task",
            directory="content_iterator",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{CONTENT_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )

        check_files_checksums_directory = "check_files_checksums"
        check_files_checksums_default_payload_object = {
            f"{DATASET_ID_KEY}.$": f"$.{DATASET_ID_KEY}",
            f"{VERSION_ID_KEY}.$": f"$.{VERSION_ID_KEY}",
            f"{METADATA_URL_KEY}.$": f"$.{METADATA_URL_KEY}",
            f"{FIRST_ITEM_KEY}.$": f"$.{CONTENT_KEY}.{FIRST_ITEM_KEY}",
            f"{ASSETS_TABLE_NAME_KEY}.$": f"$.{CONTENT_KEY}.{ASSETS_TABLE_NAME_KEY}",
            f"{RESULTS_TABLE_NAME_KEY}.$": f"$.{CONTENT_KEY}.{RESULTS_TABLE_NAME_KEY}",
        }
        check_files_checksums_single_task = BatchSubmitJobTask(
            self,
            "check-files-checksums-single-task",
            env_name=env_name,
            directory=check_files_checksums_directory,
            s3_policy=s3_read_only_access_policy,
            job_queue=batch_job_queue,
            payload_object=check_files_checksums_default_payload_object,
            container_overrides_command=[
                "--dataset-id",
                f"Ref::{DATASET_ID_KEY}",
                "--version-id",
                f"Ref::{VERSION_ID_KEY}",
                "--first-item",
                f"Ref::{FIRST_ITEM_KEY}",
                "--assets-table-name",
                f"Ref::{ASSETS_TABLE_NAME_KEY}",
                "--results-table-name",
                f"Ref::{RESULTS_TABLE_NAME_KEY}",
            ],
        )
        array_size = int(
            aws_stepfunctions.JsonPath.number_at(f"$.{CONTENT_KEY}.{ITERATION_SIZE_KEY}")
        )
        check_files_checksums_array_task = BatchSubmitJobTask(
            self,
            "check-files-checksums-array-task",
            env_name=env_name,
            directory=check_files_checksums_directory,
            s3_policy=s3_read_only_access_policy,
            job_queue=batch_job_queue,
            payload_object=check_files_checksums_default_payload_object,
            container_overrides_command=[
                "--dataset-id",
                f"Ref::{DATASET_ID_KEY}",
                "--version-id",
                f"Ref::{VERSION_ID_KEY}",
                "--first-item",
                f"Ref::{FIRST_ITEM_KEY}",
                "--assets-table-name",
                f"Ref::{ASSETS_TABLE_NAME_KEY}",
                "--results-table-name",
                f"Ref::{RESULTS_TABLE_NAME_KEY}",
            ],
            array_size=array_size,
        )

        for reader in [
            content_iterator_task.lambda_function,
            check_files_checksums_single_task.job_role,
            check_files_checksums_array_task.job_role,
        ]:
            processing_assets_table.grant_read_data(reader)  # type: ignore[arg-type]
            processing_assets_table.grant(
                reader, "dynamodb:DescribeTable"  # type: ignore[arg-type]
            )

        for writer in [
            check_files_checksums_single_task.job_role,
            check_files_checksums_array_task.job_role,
        ]:
            validation_results_table.grant_read_write_data(writer)  # type: ignore[arg-type]
            validation_results_table.grant(
                writer, "dynamodb:DescribeTable"  # type: ignore[arg-type]
            )

        validation_summary_task = LambdaTask(
            self,
            "validation-summary-task",
            directory="validation_summary",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{VALIDATION_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        validation_results_table.grant_read_data(validation_summary_task.lambda_function)
        validation_results_table.grant(
            validation_summary_task.lambda_function, "dynamodb:DescribeTable"
        )

        import_dataset_role = aws_iam.Role(
            self,
            "import-dataset",
            assumed_by=aws_iam.ServicePrincipal(  # type: ignore[arg-type]
                "batchoperations.s3.amazonaws.com"
            ),
        )

        import_asset_file_function = ImportFileFunction(
            self,
            directory="import_asset_file",
            invoker=import_dataset_role,
            env_name=env_name,
            botocore_lambda_layer=botocore_lambda_layer,
        )
        import_metadata_file_function = ImportFileFunction(
            self,
            directory="import_metadata_file",
            invoker=import_dataset_role,
            env_name=env_name,
            botocore_lambda_layer=botocore_lambda_layer,
        )

        import_dataset_task = LambdaTask(
            self,
            "import-dataset-task",
            directory="import_dataset",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{IMPORT_DATASET_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )

        import_dataset_task.lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(
                resources=[import_dataset_role.role_arn],
                actions=["iam:PassRole"],
            ),
        )
        import_dataset_task.lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(resources=["*"], actions=["s3:CreateJob"])
        )

        for table in [processing_assets_table]:
            table.grant_read_data(import_dataset_task.lambda_function)
            table.grant(import_dataset_task.lambda_function, "dynamodb:DescribeTable")

        # Import status check
        wait_before_upload_status_check = Wait(
            self,
            "wait-before-upload-status-check",
            time=WaitTime.duration(Duration.seconds(10)),
        )
        upload_status_task = LambdaTask(
            self,
            "upload-status",
            directory="upload_status",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path="$.upload_status",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        validation_results_table.grant_read_data(upload_status_task.lambda_function)
        validation_results_table.grant(upload_status_task.lambda_function, "dynamodb:DescribeTable")

        upload_status_task.lambda_function.add_to_role_policy(ALLOW_DESCRIBE_ANY_S3_JOB)

        # Parameters
        import_asset_file_function_arn_parameter = aws_ssm.StringParameter(
            self,
            "import asset file function arn",
            string_value=import_asset_file_function.function_arn,
            description=f"Import asset file function ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_ASSET_FILE_FUNCTION_TASK_ARN.value,
        )
        import_metadata_file_function_arn_parameter = aws_ssm.StringParameter(
            self,
            "import metadata file function arn",
            string_value=import_metadata_file_function.function_arn,
            description=f"Import metadata file function ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_METADATA_FILE_FUNCTION_TASK_ARN.value,
        )

        import_dataset_role_arn_parameter = aws_ssm.StringParameter(
            self,
            "import dataset role arn",
            string_value=import_dataset_role.role_arn,
            description=f"Import dataset role ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_DATASET_ROLE_ARN.value,
        )

        update_dataset_catalog = LambdaTask(
            self,
            "update-dataset-catalog",
            directory="update_dataset_catalog",
            botocore_lambda_layer=botocore_lambda_layer,
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        self.message_queue.grant_send_messages(update_dataset_catalog.lambda_function)

        for storage_writer in [
            import_dataset_role,
            import_dataset_task.lambda_function,
            import_asset_file_function,
            import_metadata_file_function,
            populate_catalog_lambda,
            update_dataset_catalog.lambda_function,
        ]:
            storage_bucket.grant_read_write(storage_writer)  # type: ignore[arg-type]

        grant_parameter_read_access(
            {
                import_asset_file_function_arn_parameter: [import_dataset_task.lambda_function],
                import_dataset_role_arn_parameter: [import_dataset_task.lambda_function],
                import_metadata_file_function_arn_parameter: [import_dataset_task.lambda_function],
                processing_assets_table.name_parameter: [
                    check_stac_metadata_task.lambda_function,
                    content_iterator_task.lambda_function,
                    import_dataset_task.lambda_function,
                ],
                validation_results_table.name_parameter: [
                    check_stac_metadata_task.lambda_function,
                    content_iterator_task.lambda_function,
                    validation_summary_task.lambda_function,
                    upload_status_task.lambda_function,
                ],
                self.message_queue_name_parameter: [update_dataset_catalog.lambda_function],
            }
        )

        success_task = aws_stepfunctions.Succeed(self, "success")
        upload_failure = aws_stepfunctions.Fail(self, "upload failure")
        validation_failure = aws_stepfunctions.Succeed(self, "validation failure")

        ############################################################################################
        # STATE MACHINE
        dataset_version_creation_definition = (
            check_stac_metadata_task.next(content_iterator_task)
            .next(
                aws_stepfunctions.Choice(  # type: ignore[arg-type]
                    self, "check_files_checksums_maybe_array"
                )
                .when(
                    aws_stepfunctions.Condition.number_equals(
                        f"$.{CONTENT_KEY}.{ITERATION_SIZE_KEY}", 1
                    ),
                    check_files_checksums_single_task.batch_submit_job,
                )
                .otherwise(check_files_checksums_array_task.batch_submit_job)
                .afterwards()
            )
            .next(
                aws_stepfunctions.Choice(self, "content_iteration_finished")
                .when(
                    aws_stepfunctions.Condition.number_equals(
                        f"$.{CONTENT_KEY}.{NEXT_ITEM_KEY}", -1
                    ),
                    validation_summary_task.next(
                        aws_stepfunctions.Choice(  # type: ignore[arg-type]
                            self, "validation_successful"
                        )
                        .when(
                            aws_stepfunctions.Condition.boolean_equals(
                                f"$.{VALIDATION_KEY}.{SUCCESS_KEY}", True
                            ),
                            import_dataset_task.next(
                                wait_before_upload_status_check  # type: ignore[arg-type]
                            )
                            .next(upload_status_task)
                            .next(
                                aws_stepfunctions.Choice(
                                    self, "import_completed"  # type: ignore[arg-type]
                                )
                                .when(
                                    aws_stepfunctions.Condition.and_(
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status", "Complete"
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Complete",
                                        ),
                                    ),
                                    update_dataset_catalog.next(
                                        success_task  # type: ignore[arg-type]
                                    ),
                                )
                                .when(
                                    aws_stepfunctions.Condition.or_(
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status",
                                            "Cancelled",
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status", "Failed"
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Cancelled",
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Failed",
                                        ),
                                    ),
                                    upload_failure,  # type: ignore[arg-type]
                                )
                                .otherwise(
                                    wait_before_upload_status_check  # type: ignore[arg-type]
                                )
                            ),
                        )
                        .otherwise(validation_failure)  # type: ignore[arg-type]
                    ),
                )
                .otherwise(content_iterator_task)
            )
        )

        self.state_machine = aws_stepfunctions.StateMachine(
            self,
            f"{env_name}-dataset-version-creation",
            definition=dataset_version_creation_definition,  # type: ignore[arg-type]
        )

        self.state_machine_parameter = aws_ssm.StringParameter(
            self,
            "state machine arn",
            description=f"State machine ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_DATASET_VERSION_CREATION_STEP_FUNCTION_ARN.value,  # pylint:disable=line-too-long
            string_value=self.state_machine.state_machine_arn,
        )

        Tags.of(self).add("ApplicationLayer", "processing")  # type: ignore[arg-type]
Пример #30
0
    def __init__(self, scope: Construct, stack_id: str, *, env_name: str) -> None:
        super().__init__(scope, stack_id)

        ############################################################################################
        # ### DEPLOYMENT VERSION ###################################################################
        ############################################################################################

        aws_ssm.StringParameter(
            self,
            "git-branch",
            parameter_name=f"/{env_name}/git_branch",
            string_value=GIT_BRANCH,
            description="Deployment git branch",
        )

        aws_ssm.StringParameter(
            self,
            "git-commit",
            parameter_name=f"/{env_name}/git_commit",
            string_value=GIT_COMMIT,
            description="Deployment git commit",
        )

        aws_ssm.StringParameter(
            self,
            "git-tag",
            parameter_name=f"/{env_name}/version",
            string_value=GIT_TAG,
            description="Deployment version",
        )

        ############################################################################################
        # ### STORAGE S3 BUCKET ####################################################################
        ############################################################################################
        self.storage_bucket = aws_s3.Bucket(
            self,
            "storage-bucket",
            bucket_name=ResourceName.STORAGE_BUCKET_NAME.value,
            access_control=aws_s3.BucketAccessControl.PRIVATE,
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            removal_policy=REMOVAL_POLICY,
        )

        ############################################################################################
        # ### APPLICATION DB #######################################################################
        ############################################################################################
        self.datasets_table = Table(
            self,
            f"{env_name}-datasets",
            env_name=env_name,
            parameter_name=ParameterName.STORAGE_DATASETS_TABLE_NAME,
        )

        self.datasets_table.add_global_secondary_index(
            index_name=DatasetsTitleIdx.Meta.index_name,
            partition_key=aws_dynamodb.Attribute(
                name="title", type=aws_dynamodb.AttributeType.STRING
            ),
        )

        self.validation_results_table = Table(
            self,
            f"{env_name}-validation-results",
            env_name=env_name,
            parameter_name=ParameterName.STORAGE_VALIDATION_RESULTS_TABLE_NAME,
            sort_key=aws_dynamodb.Attribute(name="sk", type=aws_dynamodb.AttributeType.STRING),
        )

        self.validation_results_table.add_global_secondary_index(
            index_name=ValidationOutcomeIdx.Meta.index_name,
            partition_key=aws_dynamodb.Attribute(
                name=ValidationOutcomeIdx.pk.attr_name, type=aws_dynamodb.AttributeType.STRING
            ),
            sort_key=aws_dynamodb.Attribute(
                name=ValidationOutcomeIdx.result.attr_name, type=aws_dynamodb.AttributeType.STRING
            ),
        )

        Tags.of(self).add("ApplicationLayer", "storage")  # type: ignore[arg-type]