Ejemplo n.º 1
0
    def __create_eks_control_plane(self, vpc: ec2.Vpc) -> eks.Cluster:
        # This role is used to connect to the cluster with admin access
        # It is be associated to system:masters kubernetes RBAC group
        masters_role = iam.Role(
            self,
            'eksClusterAdmin',
            role_name='eks-cluster-admin-'+self._config['stage'],
            assumed_by=iam.AccountRootPrincipal()
        )

        # Control plane role
        # It provides permissions for the Kubernetes control plane
        # to make calls to AWS API operations on your behalf.
        role = self.__create_eks_control_plane_role()

        eks_config = self._config['compute']['eks']
        self._cluster = eks.Cluster(
            scope=self,
            id="ControlPlane",
            cluster_name=self._config['name'],
            role=role,
            masters_role=masters_role,
            version=eks.KubernetesVersion.of(eks_config['version']),
            vpc=vpc,
            vpc_subnets=list(
                map(lambda group_name: ec2.SubnetSelection(subnet_group_name=group_name),
                    eks_config['subnetGroupNames'])
            ),
            default_capacity=0,
        )
Ejemplo n.º 2
0
    def __init__(self, scope: core.Construct, construct_id: str, 
        my_service_details={}, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.my_service_details = my_service_details

        masters_role = iam.Role(
            self, "clusterAdmin",
            role_name="demo_EKS_cluster_role",
            assumed_by=iam.AccountRootPrincipal()
        )

        k8s_cluster = eks.Cluster(
            self, "defaultCluster", 
            cluster_name="DemoEKS",
            version=eks.KubernetesVersion.V1_19,
            default_capacity=1,
            default_capacity_type=eks.DefaultCapacityType.EC2,
            default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL),
            masters_role=masters_role
            )
        k8s_cluster.add_fargate_profile(
            "FargateEnabled", selectors=[
                eks.Selector(
                    namespace="eksdemo", 
                    labels={"fargate":"enabled"})
            ]
        )

        my_service = EksServices(self, "myService", eks_cluster=k8s_cluster, service=self.my_service_details)
Ejemplo n.º 3
0
    def _setup_opensearch_1_0(self) -> None:
        domain_name = "wrangler-os-1-0"
        validate_domain_name(domain_name)
        domain_arn = f"arn:aws:es:{self.region}:{self.account}:domain/{domain_name}"
        domain = opensearch.Domain(
            self,
            domain_name,
            domain_name=domain_name,
            version=opensearch.EngineVersion.OPENSEARCH_1_0,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type="t3.small.search", data_nodes=1),
            access_policies=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["es:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=[f"{domain_arn}/*"],
                )
            ],
            removal_policy=RemovalPolicy.DESTROY,
        )

        CfnOutput(self,
                  f"DomainEndpoint-{domain_name}",
                  value=domain.domain_endpoint)
Ejemplo n.º 4
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        cluster_admin = iam.Role(self,
                                 'ClusterAdmin',
                                 assumed_by=iam.AccountRootPrincipal(),
                                 role_name='eks_cdk_admin')

        # The code that defines your stack goes here
        example_cluster = eks.Cluster(self,
                                      'Example',
                                      version=eks.KubernetesVersion.V1_19,
                                      masters_role=cluster_admin)

        example_cluster.aws_auth.add_user_mapping(user=iam.User.from_user_name(
            self, 'K8SUser', 'k8s'),
                                                  groups=['system:masters'])

        example_cluster.add_fargate_profile(
            'ExampleFargate',
            selectors=[{
                'namespace': 'kube-system'
            }],
            fargate_profile_name='ExampleFargate')
Ejemplo n.º 5
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        cluster_name: str,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # EKS admin role
        self._clusterAdminRole = iam.Role(
            self, 'clusterAdmin', assumed_by=iam.AccountRootPrincipal())
        self._clusterAdminRole.add_to_policy(
            iam.PolicyStatement(
                resources=["*"],
                actions=[
                    "eks:Describe*", "eks:List*", "eks:AccessKubernetesApi",
                    "ssm:GetParameter", "iam:ListRoles"
                ],
            ))
        core.Tags.of(self._clusterAdminRole).add(key='eks/%s/type' %
                                                 cluster_name,
                                                 value='admin-role')

        # Managed Node Group Instance Role
        _managed_node_managed_policies = (
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonEKSWorkerNodePolicy'),
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonEKS_CNI_Policy'),
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonEC2ContainerRegistryReadOnly'),
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'CloudWatchAgentServerPolicy'),
        )
        self._managed_node_role = iam.Role(
            self,
            'NodeInstance-Role',
            path='/',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=list(_managed_node_managed_policies),
        )

        # Override Cfn Nag rule
        scan.suppress_cfnnag_rule(
            'W12', 'by default the role has * resource',
            self._clusterAdminRole.node.find_child(
                'DefaultPolicy').node.default_child)
Ejemplo n.º 6
0
    def __init__(self,
                 scope,
                 id,
                 *,
                 description=None,
                 env=None,
                 tags=None,
                 synthesizer=None):
        super().__init__(scope,
                         id,
                         description=description,
                         env=env,
                         tags=tags,
                         synthesizer=synthesizer)

        vpc = ec2.Vpc(
            self,
            f"kodexa-vpc-{id}",
            max_azs=2,
            cidr="10.10.0.0/16",
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24)
            ],
            nat_gateways=1,
        )

        core.CfnOutput(self, "Output", value=vpc.vpc_id)

        # Create K8S cluster

        cluster_admin = iam.Role(self,
                                 f"kodexa-eks-adminrole-{id}",
                                 assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(self,
                              id=f'kodexa-eks-cluster-{id}',
                              cluster_name=f'kodexa-eks-cluster-{id}',
                              version=eks.KubernetesVersion.V1_17,
                              vpc=vpc,
                              default_capacity=4,
                              masters_role=cluster_admin)
Ejemplo n.º 7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        cluster_admin = iam.Role(self,
                                 "AdminRole",
                                 assumed_by=iam.AccountRootPrincipal())

        vpc = ec2.Vpc(self, "EKSVpc", cidr="10.2.0.0/16")

        eksCluster = eks.Cluster(
            self,
            "fedcluster",
            vpc=vpc,
            cluster_name="awsfedcluster",
            kubectl_enabled=True,
            masters_role=cluster_admin,
            default_capacity=2,
            default_capacity_instance=ec2.InstanceType("t3.large"))
Ejemplo n.º 8
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        clusters: typing.List[aws_eks.ICluster],
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        principal = aws_iam.AccountRootPrincipal()

        self.admin = eks_user.eks_user(
            scope=self,
            id='eks-admin',
            role_name=id + '-admin',
            k8s_username='******',
            k8s_groups=['system:masters'],
            clusters=clusters,
            principal=principal,
        )

        self.dev_team_x = eks_user.eks_user(
            scope=self,
            id='eks-dev-team-x',
            role_name=id + '-dev-team-x',
            k8s_username='******',
            k8s_groups=['dev-team-x'],
            clusters=clusters,
            principal=principal,
        )

        self.dev_team_y = eks_user.eks_user(
            scope=self,
            id='eks-dev-team-y',
            role_name=id + '-dev-team-y',
            k8s_username='******',
            k8s_groups=['dev-team-y'],
            clusters=clusters,
            principal=principal,
        )

        self.cluster_users = [self.admin, self.dev_team_x, self.dev_team_y]
Ejemplo n.º 9
0
    def __init__(self, scope, id, *, description=None, env=None, tags=None, synthesizer=None, iam_user=None,
                 vpc_id=None, default_capacity=4, default_instance_type='t3a.large'):
        super().__init__(scope, id, description=description, env=env, tags=tags,
                         synthesizer=synthesizer)

        if vpc_id:
            vpc = ec2.Vpc.from_lookup(self, "VPC",
                                      vpc_id=vpc_id)
        else:
            vpc = ec2.Vpc(self, f"kodexa-vpc-{id}",
                          max_azs=2,
                          cidr="10.10.0.0/16",
                          subnet_configuration=[ec2.SubnetConfiguration(
                              subnet_type=ec2.SubnetType.PUBLIC,
                              name="Public",
                              cidr_mask=24
                          ), ec2.SubnetConfiguration(
                              subnet_type=ec2.SubnetType.PRIVATE,
                              name="Private",
                              cidr_mask=24
                          )],
                          nat_gateways=1,
                          )

            core.CfnOutput(self, "Output",
                           value=vpc.vpc_id)

        # Create K8S cluster

        cluster_admin = iam.Role(self, f"kodexa-eks-adminrole-{id}", assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(self, id=f'kodexa-eks-cluster-{id}', cluster_name=f'kodexa-eks-cluster-{id}',
                              version=eks.KubernetesVersion.V1_17,
                              vpc=vpc,
                              default_capacity_instance=ec2.InstanceType(default_instance_type),
                              default_capacity=default_capacity,
                              masters_role=cluster_admin)

        if iam_user:
            admin_user = iam.User.from_user_name(id='cluster-admin-iam-user', user_name=iam_user, scope=self)
            cluster.aws_auth.add_user_mapping(admin_user, groups=['system:masters'])
Ejemplo n.º 10
0
    def add_deny_for_everyone_except(
        self,
        master_secret: secretsmanager.Secret,
        producer_functions: List[lambda_.Function],
    ) -> None:
        """
        Sets up the master secret resource policy so that everything *except* the given functions
        is denied access to GetSecretValue.

        Args:
            master_secret: the master secret construct
            producer_functions: a list of functions we are going to set as the only allowed accessors
        """
        # this end locks down the master secret so that *only* the JWT producer can read values
        # (it is only when we set the DENY policy here that in general other roles in the same account
        #  cannot access the secret value - so it is only after doing that that we need to explicitly enable
        #  the role we do want to access it)
        role_arns: List[str] = []

        for f in producer_functions:
            if not f.role:
                raise Exception(
                    f"Rotation function {f.function_name} has somehow not created a Lambda role correctly"
                )

            role_arns.append(f.role.role_arn)

        master_secret.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["secretsmanager:GetSecretValue"],
                resources=["*"],
                principals=[iam.AccountRootPrincipal()],
                # https://stackoverflow.com/questions/63915906/aws-secrets-manager-resource-policy-to-deny-all-roles-except-one-role
                conditions={
                    "ForAllValues:StringNotEquals": {
                        "aws:PrincipalArn": role_arns
                    }
                },
            ))
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 stack_log_level: str, store_events_topic, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below)

        # Sales Queue Consumer
        sales_q = _sqs.Queue(
            self,
            "salesEventsQueue",
            delivery_delay=cdk.Duration.seconds(5),
            queue_name=f"sales_q",
            retention_period=cdk.Duration.days(2),
            visibility_timeout=cdk.Duration.seconds(10),
            receive_message_wait_time=cdk.Duration.seconds(10))

        # Create a Filter for Sales Subscription
        sales_policy = {
            "evnt_type": _sns.SubscriptionFilter(conditions=["sales-event"])
        }

        # Create an SQS type subscription to SNS
        sales_subs = _sns_subs.SqsSubscription(sales_q,
                                               filter_policy=sales_policy)

        # Add the scription to topic
        store_events_topic.add_subscription(sales_subs)

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_sns_consumer_stack/lambda_src/sqs_data_consumer.py",
                    encoding="utf-8",
                    mode="r") as f:
                msg_consumer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise
        msg_consumer_fn = _lambda.Function(
            self,
            "msgConsumerFn",
            function_name=f"sales_queue_consumer_fn",
            description="Process messages in SQS queue",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(msg_consumer_fn_code),
            handler="index.lambda_handler",
            timeout=cdk.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production",
                "SALES_QUEUE_NAME": f"{sales_q.queue_name}",
                "TRIGGER_RANDOM_DELAY": "True"
            })

        msg_consumer_fn_version = msg_consumer_fn.latest_version
        msg_consumer_fn_version_alias = _lambda.Alias(
            self,
            "msgConsumerFnAlias",
            alias_name="MystiqueAutomation",
            version=msg_consumer_fn_version)

        # Create Custom Loggroup for Producer
        msg_consumer_fn_lg = _logs.LogGroup(
            self,
            "msgConsumerFnLogGroup",
            log_group_name=f"/aws/lambda/{msg_consumer_fn.function_name}",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        msg_consumer_fn.add_permission("restrictLambdaInvocationToOwnAccount",
                                       principal=_iam.AccountRootPrincipal(),
                                       action="lambda:InvokeFunction",
                                       source_account=cdk.Aws.ACCOUNT_ID,
                                       source_arn=sales_q.queue_arn)

        # Set our Lambda Function to be invoked by SQS
        msg_consumer_fn.add_event_source(_sqsEventSource(sales_q,
                                                         batch_size=5))

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_2 = cdk.CfnOutput(
            self,
            "SalesEventsConsumer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={cdk.Aws.REGION}#/functions/{msg_consumer_fn.function_name}",
            description="Process events received from SQS event bus")
Ejemplo n.º 12
0
    def __init__(self, scope: core.Construct, id: str, VPC: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster_admin = iam.Role(self,
                                 "AdminRole",
                                 assumed_by=iam.AccountRootPrincipal())

        self.cluster = eks.Cluster(self,
                                   "cluster",
                                   default_capacity=self.node.try_get_context(
                                       "kubernetes")["default_capacity"],
                                   default_capacity_instance=ec2.InstanceType(
                                       self.node.try_get_context("kubernetes")
                                       ["default_capacity_instance"]),
                                   cluster_name="statement-demo",
                                   vpc=VPC,
                                   vpc_subnets=VPC.private_subnets,
                                   masters_role=cluster_admin,
                                   version=eks.KubernetesVersion.V1_17,
                                   endpoint_access=eks.EndpointAccess.PRIVATE)

        vpc_security_group = ec2.SecurityGroup.from_security_group_id(
            self, "sgVPC", VPC.vpc_default_security_group)
        eks_security_group = ec2.SecurityGroup.from_security_group_id(
            self, "sgEKS", self.cluster.cluster_security_group_id)

        vpc_security_group.add_ingress_rule(eks_security_group,
                                            ec2.Port.all_traffic())

        eks_security_group.add_ingress_rule(vpc_security_group,
                                            ec2.Port.all_traffic())

        self.cluster.default_nodegroup.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "CloudWatchAgentServerPolicy"))

        #see https://github.com/kubernetes/kubernetes/issues/61486?#issuecomment-635169272
        eks.KubernetesPatch(
            self,
            "patch",
            cluster=self.cluster,
            resource_name="daemonset/kube-proxy",
            resource_namespace="kube-system",
            apply_patch={
                "spec": {
                    "template": {
                        "spec": {
                            "containers": [{
                                "name":
                                "kube-proxy",
                                "command": [
                                    "kube-proxy",
                                    "--v=2",
                                    "--hostname-override=$(NODE_NAME)",
                                    "--config=/var/lib/kube-proxy-config/config",
                                ],
                                "env": [{
                                    "name": "NODE_NAME",
                                    "valueFrom": {
                                        "fieldRef": {
                                            "apiVersion": "v1",
                                            "fieldPath": "spec.nodeName"
                                        }
                                    }
                                }]
                            }]
                        }
                    }
                }
            },
            restore_patch={
                "spec": {
                    "template": {
                        "spec": {
                            "containers": [{
                                "name":
                                "kube-proxy",
                                "command": [
                                    "kube-proxy", "--v=2",
                                    "--config=/var/lib/kube-proxy-config/config"
                                ]
                            }]
                        }
                    }
                }
            })

        # elasticsearch clusters has many nodes, and its DNS records are always truncated by OpenDNS
        eks.KubernetesPatch(
            self,
            "coreDNSTCP",
            cluster=self.cluster,
            resource_name="configmap/coredns",
            resource_namespace="kube-system",
            apply_patch={
                "data": {
                    "Corefile":
                    ".:53 {\n    errors\n    health\n    kubernetes cluster.local in-addr.arpa ip6.arpa {\n      pods insecure\n      upstream\n      fallthrough in-addr.arpa ip6.arpa\n    }\n    prometheus :9153\n    forward . /etc/resolv.conf {\n      force_tcp\n    }\n    cache 30\n    loop\n    reload\n    loadbalance\n}\n"
                }
            },
            restore_patch={
                "data": {
                    "Corefile":
                    ".:53 {\n    errors\n    health\n    kubernetes cluster.local in-addr.arpa ip6.arpa {\n      pods insecure\n      upstream\n      fallthrough in-addr.arpa ip6.arpa\n    }\n    prometheus :9153\n    forward . /etc/resolv.conf\n    cache 30\n    loop\n    reload\n    loadbalance\n}\n"
                }
            })

        # adding myself as a cluster admin
        self.cluster.aws_auth.add_user_mapping(iam.User.from_user_name(
            self, "me",
            boto3.client('sts').get_caller_identity().get('Arn').partition('/')
            [2]),
                                               groups=["system:masters"])

        text = requests.get(
            "https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/quickstart/cwagent-fluentd-quickstart.yaml"
        ).text.replace("{{cluster_name}}", self.cluster.cluster_name).replace(
            "{{region_name}}",
            core.Stack.of(self).region)
        eks.KubernetesManifest(
            self,
            "containerInsights",
            cluster=self.cluster,
            manifest=[yaml.load(item) for item in text.split("---\n")])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bkt01 = s3.Bucket(
            self,
            "abacBucket",
            versioned=True,
            # encryption=s3.BucketEncryption.KMS_MANAGED,
            block_public_access=s3.BlockPublicAccess(block_public_policy=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        bkt01.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                # actions=["s3:GetObject"],
                actions=["s3:*"],
                # resources=[bkt01.arn_for_objects("file.txt")],
                resources=[bkt01.arn_for_objects("*")],
                principals=[iam.AccountRootPrincipal()]))
        # Create 3 Users: 1 Admin & 2 Normal Users
        redUser1 = iam.User(
            self,
            "redUser1",
            user_name="redUser",
            password=core.SecretValue.plain_text("redUser1SUPERDUMBpassWord"))

        blueUser1 = iam.User(
            self,
            "blueUser1",
            user_name="blueUser",
            password=core.SecretValue.plain_text("blueUser1SUPERDUMBpassWord"))

        adminUser1 = iam.User(self,
                              "adminUser1",
                              user_name="adminUser",
                              password=core.SecretValue.plain_text(
                                  "adminUser1SUPERDUMBpassWord"))

        unicornGrp = iam.Group(self, "unicornGrp", group_name="unicornGroup")

        # Add Users To Group
        unicornGrp.add_user(redUser1)
        unicornGrp.add_user(blueUser1)
        unicornGrp.add_user(adminUser1)

        # blueGrp1.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess"))
        ##############################################
        # We need a custom resource to TAG IAM Users #
        ##############################################

        resource = MyCustomResource(self,
                                    "iamTagger",
                                    message=[{
                                        "user":
                                        redUser1.user_name,
                                        "tags": [{
                                            'Key': 'teamName',
                                            'Value': 'teamUnicorn'
                                        }, {
                                            'Key': 'projectName',
                                            'Value': 'projectRed'
                                        }]
                                    }, {
                                        "user":
                                        blueUser1.user_name,
                                        "tags": [{
                                            'Key': 'teamName',
                                            'Value': 'teamUnicorn'
                                        }, {
                                            'Key': 'projectName',
                                            'Value': 'projectBlue'
                                        }]
                                    }, {
                                        "user":
                                        adminUser1.user_name,
                                        "tags": [{
                                            'Key': 'teamName',
                                            'Value': 'teamUnicorn'
                                        }, {
                                            'Key': 'projectAdmin',
                                            'Value': 'yes'
                                        }]
                                    }])

        # Publish the custom resource output
        core.CfnOutput(
            self,
            "ResponseMessage",
            description="The message that came back from the Custom Resource",
            value=resource.response,
        )

        # Lets Create the IAM Role to be used by the groups
        accountId = core.Aws.ACCOUNT_ID
        unicornTeamProjectRedRole = iam.Role(
            self,
            'unicornTeamProjectRedRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="unicornTeamProjectRedRole")
        core.Tag.add(unicornTeamProjectRedRole,
                     key="teamName",
                     value="teamUnicorn")
        core.Tag.add(unicornTeamProjectRedRole,
                     key="projectName",
                     value="projectRed")

        unicornTeamProjectBlueRole = iam.Role(
            self,
            'unicornTeamProjectBlueRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="unicornTeamProjectBlueRole")
        core.Tag.add(unicornTeamProjectBlueRole,
                     key="teamName",
                     value="teamUnicorn")
        core.Tag.add(unicornTeamProjectBlueRole,
                     key="projectName",
                     value="projectBlue")

        unicornTeamProjectAdminRole = iam.Role(
            self,
            'unicornTeamProjectAdminRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="unicornTeamProjectAdminRole")
        core.Tag.add(unicornTeamProjectAdminRole,
                     key="teamName",
                     value="teamUnicorn")
        core.Tag.add(unicornTeamProjectAdminRole,
                     key="projectAdmin",
                     value="yes")

        # Allow Group to Assume Role
        grpStmt1 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[f"arn:aws:iam::{accountId}:role/unicornTeamProject*"],
            actions=["sts:AssumeRole"],
            conditions={
                "StringEquals": {
                    "iam:ResourceTag/teamName":
                    "${aws:PrincipalTag/teamName}",
                    "iam:ResourceTag/projectName":
                    "${aws:PrincipalTag/projectName}"
                }
            })
        grpStmt1.sid = "AllowGroupMembersToAssumeRoleMatchingTeamName"
        unicornGrp.add_to_policy(grpStmt1)

        # Add Permissions to the Role
        roleStmt1 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=["*"],
            actions=["s3:ListAllMyBuckets", "s3:HeadBucket"])
        roleStmt1.sid = "AllowGroupToSeeBucketListInTheConsole"

        roleStmt2 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[bkt01.bucket_arn],
            actions=["s3:ListBucket", "s3:ListBucketVersions"],
            # Below condition can be used to enable listing a particular prefix in another statement
            # conditions={ "StringEquals" : { "s3:prefix":[""], "s3:delimiter":["/"] } }
        )
        roleStmt2.sid = "AllowRootLevelListingOfBucket"

        roleStmt3 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[bkt01.arn_for_objects("*")],
            actions=["s3:Get*"],
            conditions={
                "StringEquals": {
                    "s3:ExistingObjectTag/teamName":
                    "${aws:PrincipalTag/teamName}",
                    "s3:ExistingObjectTag/projectName":
                    "${aws:PrincipalTag/projectName}"
                }
            })
        roleStmt3.sid = "ReadOnlyAccessToTeams"

        roleStmt4 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[bkt01.arn_for_objects("*")],
            actions=[
                "s3:PutObject", "s3:PutObjectTagging",
                "s3:PutObjectVersionTagging"
            ],
            conditions={
                "StringEquals": {
                    "s3:RequestObjectTag/teamName":
                    "${aws:PrincipalTag/teamName}",
                    "s3:RequestObjectTag/projectName":
                    "${aws:PrincipalTag/projectName}"
                }
            })
        roleStmt4.sid = "WriteTaggedObjectOwnedByThem"

        roleStmt5 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[bkt01.bucket_arn,
                       bkt01.arn_for_objects("*")],
            actions=["s3:*"],
            conditions={
                "StringEquals": {
                    "${aws:PrincipalTag/projectAdmin}": ["yes"]
                }
            })
        roleStmt5.sid = "FullAccessToAdminsFromSameTeam"

        unicornTeamProjectRedRole.add_to_policy(roleStmt1)
        unicornTeamProjectRedRole.add_to_policy(roleStmt2)
        unicornTeamProjectRedRole.add_to_policy(roleStmt3)
        unicornTeamProjectRedRole.add_to_policy(roleStmt4)
        unicornTeamProjectRedRole.add_to_policy(roleStmt5)

        # Add same permissions to projectBlueRole
        unicornTeamProjectBlueRole.add_to_policy(roleStmt1)
        unicornTeamProjectBlueRole.add_to_policy(roleStmt2)
        unicornTeamProjectBlueRole.add_to_policy(roleStmt3)
        unicornTeamProjectBlueRole.add_to_policy(roleStmt4)
        unicornTeamProjectBlueRole.add_to_policy(roleStmt5)

        # Add same permissions to projectAdminRole
        unicornTeamProjectAdminRole.add_to_policy(roleStmt1)
        unicornTeamProjectAdminRole.add_to_policy(roleStmt2)
        unicornTeamProjectAdminRole.add_to_policy(roleStmt3)
        unicornTeamProjectAdminRole.add_to_policy(roleStmt4)
        unicornTeamProjectAdminRole.add_to_policy(roleStmt5)
Ejemplo n.º 14
0
def create_action(
    scope: core.Construct,
    id: str,
    action_def: Union[CodeCommitAction, CodeBuildAction,
                      CloudFormationCreateUpdateStackAction, ApprovalAction,
                      LambdaInvokeAction, S3SourceAction, ],
):
    action_name = action_def.pop("name")
    run_order = action_def.get("run_order", 1)
    variables_namespace = action_def.get("variables_namespace")
    role = (aws_iam.Role.from_role_arn(scope, f"{id}RoleRef",
                                       action_def["role_arn"])
            if "role_arn" in action_def else None)

    if action_def["type"] == "CODECOMMIT":
        action_def = cast(CodeCommitAction, action_def)
        repository = aws_codecommit.Repository.from_repository_name(
            scope, f"{id}Repo", action_def["repository"])
        output = aws_codepipeline.Artifact(action_def["output"])
        return aws_codepipeline_actions.CodeCommitSourceAction(
            action_name=action_name,
            output=output,
            repository=repository,
            branch=action_def.get("branch", "master"),
            run_order=run_order,
            role=role,
            variables_namespace=variables_namespace,
        )
    elif action_def["type"] == "S3_SOURCE":
        action_def = cast(S3SourceAction, action_def)
        output = aws_codepipeline.Artifact(action_def["output"])
        if "kms_key_arn" in action_def:
            role = aws_iam.Role(
                scope,
                f"{id}Role",
                assumed_by=aws_iam.AccountRootPrincipal(),
            )
            aws_kms.Key.from_key_arn(
                scope, f"{id}KeyRef",
                key_arn=action_def["kms_key_arn"]).grant_decrypt(role)
        if "bucket" in action_def:
            bucket = aws_s3.Bucket.from_bucket_name(scope,
                                                    f"{id}SourceBucketRef",
                                                    action_def["bucket"])
        else:
            bucket = aws_s3.Bucket(
                scope,
                f"{id}SourceBucket",
                block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
                removal_policy=core.RemovalPolicy.DESTROY,
            )
            core.CfnOutput(scope,
                           f"{id}SourceBucketName",
                           value=bucket.bucket_name)
        return aws_codepipeline_actions.S3SourceAction(
            action_name=action_name,
            output=output,
            run_order=run_order,
            role=role,
            bucket=bucket,
            bucket_key=action_def["key"],
        )
    elif action_def["type"] == "CODEBUILD":
        action_def = cast(CodeBuildAction, action_def)
        # Set up CodeBuild project
        project_params = {
            "build_spec":
            aws_codebuild.BuildSpec.from_source_filename(
                action_def.get("build_spec", "buildspec.yaml")),
            "timeout":
            core.Duration.minutes(int(action_def.get("timeout_minutes", 60))),
        }
        project_params["environment"] = {
            "build_image": aws_codebuild.LinuxBuildImage.AMAZON_LINUX_2_3
        }
        if "environment" in action_def:
            if "build_image" in action_def["environment"]:
                project_params["environment"]["build_image"] = getattr(
                    aws_codebuild.LinuxBuildImage,
                    action_def["environment"].pop("build_image"),
                )
            if "compute_type" in action_def["environment"]:
                project_params["environment"]["compute_type"] = getattr(
                    aws_codebuild.ComputeType,
                    action_def["environment"].pop("compute_type"),
                )
            project_params["environment"].update(**action_def["environment"])
        project_role = aws_iam.Role(
            scope,
            f"{id}CodeBuildRole",
            path="/codebuild/",
            assumed_by=aws_iam.ServicePrincipal(
                service="codebuild.amazonaws.com"),
        )
        project_role.add_to_policy(
            aws_iam.PolicyStatement(actions=["*"],
                                    resources=["*"],
                                    effect=aws_iam.Effect.ALLOW))
        project_environment_variables = ({
            var_key: aws_codebuild.BuildEnvironmentVariable(
                value=str(var_value),
                type=aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
            )
            for var_key, var_value in
            action_def["environment_variables"].items()
            if "#" not in str(var_value)
        } if "environment_variables" in action_def else None)
        project = aws_codebuild.PipelineProject(
            scope,
            f"{id}Project",
            project_name=id,
            role=project_role,
            environment_variables=project_environment_variables,
            **project_params,
        )
        pipeline_environment_variables = ({
            var_key: aws_codebuild.BuildEnvironmentVariable(
                value=str(var_value),
                type=aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
            )
            for var_key, var_value in
            action_def["environment_variables"].items()
            if "#" in str(var_value)
        } if "environment_variables" in action_def else None)
        extra_inputs = ([
            aws_codepipeline.Artifact(input_)
            for input_ in action_def["extra_inputs"]
        ] if "extra_inputs" in action_def else None)
        outputs = ([
            aws_codepipeline.Artifact(output)
            for output in action_def["outputs"]
        ] if "outputs" in action_def else None)
        return aws_codepipeline_actions.CodeBuildAction(
            action_name=action_name,
            input=aws_codepipeline.Artifact(action_def["input"]),
            project=project,
            run_order=run_order,
            role=role,
            variables_namespace=variables_namespace,
            environment_variables=pipeline_environment_variables,
            extra_inputs=extra_inputs,
            outputs=outputs,
        )
    elif action_def["type"] == "CLOUDFORMATION":
        action_def = cast(CloudFormationCreateUpdateStackAction, action_def)
        return aws_codepipeline_actions.CloudFormationCreateUpdateStackAction(
            action_name=action_name,
            admin_permissions=False,
            stack_name=action_def["stack_name"],
            template_path=aws_codepipeline.ArtifactPath(
                aws_codepipeline.Artifact(action_def["input"]),
                action_def.get("template_path", "template.yaml"),
            ),
            capabilities=[
                # This lstrip does not support all possibilties, but is good enough for now
                aws_cloudformation.CloudFormationCapabilities[
                    capability.lstrip("CAPABILITY_")]
                for capability in action_def["capabilities"]
            ] if "capabilities" in action_def else None,
            deployment_role=role,
            role=role,
            parameter_overrides=action_def.get("parameter_overrides"),
            run_order=run_order,
            variables_namespace=variables_namespace,
        )
    elif action_def["type"] == "APPROVAL":
        action_def = cast(ApprovalAction, action_def)
        return aws_codepipeline_actions.ManualApprovalAction(
            action_name=action_name,
            run_order=run_order,
            role=role,
            additional_information=action_def.get("additional_information"),
            external_entity_link=action_def.get("external_entity_link"),
            notification_topic=action_def.get("notification_topic"),
            variables_namespace=variables_namespace,
        )
    elif action_def["type"] == "LAMBDA":
        action_def = cast(LambdaInvokeAction, action_def)
        user_parameters = action_def.get("user_parameters")
        return aws_codepipeline_actions.LambdaInvokeAction(
            action_name=action_name,
            run_order=run_order,
            lambda_=aws_lambda.Function.from_function_arn(
                scope, f"{id}Lambda", action_def["function_arn"]),
            user_parameters=user_parameters,
            role=role,
            variables_namespace=variables_namespace,
        )
Ejemplo n.º 15
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 runnerrole: iam.IRole, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        clusterAdmin = iam.Role(self,
                                "AdminRole",
                                assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(self, 'ekscdkdemo', vpc=vpc, default_capacity=0)

        asg_worker_nodes = cluster.add_capacity(
            'eksspot-cdkdemo',
            spot_price="0.0544",
            instance_type=ec2.InstanceType('t3.medium'),
            desired_capacity=2,
            bootstrap_options=eks.BootstrapOptions(
                docker_config_json=read_docker_daemon_resource(
                    'eksbaseresource/docker-daemon.json')))

        alb_rbac = eks.KubernetesResource(
            self,
            'alb-rbac',
            cluster=cluster,
            manifest=read_k8s_resource('eksbaseresource/alb-rbac.yml'))

        asg_worker_nodes.add_to_role_policy(iampolicy)
        cluster.aws_auth.add_masters_role(clusterAdmin)
        cluster.aws_auth.add_masters_role(runnerrole)

        service_account = cluster.add_service_account("external-dns-sa",
                                                      name='external-dns-sa')

        wellnessuser_irsa = cluster.add_service_account("wellnessuser",
                                                        name='wellnessuser')

        service_account.add_to_principal_policy(dnspolicy)

        deployment = {
            "apiVersion": "apps/v1",
            "kind": "Deployment",
            "metadata": {
                "labels": {
                    "app.kubernetes.io/name": "alb-ingress-controller"
                },
                "name": "alb-ingress-controller",
                "namespace": "kube-system"
            },
            "spec": {
                "selector": {
                    "matchLabels": {
                        "app.kubernetes.io/name": "alb-ingress-controller"
                    }
                },
                "template": {
                    "metadata": {
                        "labels": {
                            "app.kubernetes.io/name": "alb-ingress-controller"
                        }
                    },
                    "spec": {
                        "containers": [{
                            "name":
                            "alb-ingress-controller",
                            "args": [
                                "--ingress-class=alb",
                                "--cluster-name=" + cluster.cluster_name
                            ],
                            "image":
                            "docker.io/amazon/aws-alb-ingress-controller:v1.1.8"
                        }],
                        "serviceAccountName":
                        "alb-ingress-controller"
                    }
                }
            }
        }
        alb_service = cluster.add_resource('alb-ingress-controller',
                                           deployment)
        external_dns = eks.KubernetesResource(
            self,
            'external-dns',
            cluster=cluster,
            manifest=read_k8s_resource('eksbaseresource/external-dns.yml'))
        alb_service.node.add_dependency(alb_rbac)
        external_dns.node.add_dependency(service_account)
        core.CfnOutput(self,
                       'ClusterAdmin_Role_ARN',
                       value=clusterAdmin.role_arn)
        core.CfnOutput(
            self,
            'Getupdateeks',
            value="aws eks update-kubeconfig --name " + cluster.cluster_name +
            " --region ap-northeast-1 --role-arn " + clusterAdmin.role_arn)

        wellness_kns_stream = kinesis.Stream(
            self,
            'WellnessKnsStream',
            retention_period=core.Duration.hours(24),
            shard_count=1,
            stream_name='event.member.appointment.devInfo')

        wellness_kns_stream.grant_read_write(wellnessuser_irsa)

        core.CfnOutput(self,
                       'kinesis_stream_arn',
                       value=wellness_kns_stream.stream_arn)

        core.CfnOutput(self,
                       'kinesis_stream_name',
                       value=wellness_kns_stream.stream_name)
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 stack_log_level: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):
        # Create SNS Topic
        self.store_events_topic = _sns.Topic(
            self,
            "dataTopic",
            display_name="MessageFilterDemoTopic",
            topic_name=f"store_events_{construct_id}_topic")

        ########################################
        #######                          #######
        #######   Stream Data Producer   #######
        #######                          #######
        ########################################

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_sns_producer_stack/lambda_src/stream_data_producer.py",
                    encoding="utf-8",
                    mode="r",
            ) as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        data_producer_fn = _lambda.Function(
            self,
            "streamDataProducerFn",
            function_name=f"data_producer_{construct_id}",
            description="Produce streaming data events and push to SNS stream",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(data_producer_fn_code),
            handler="index.lambda_handler",
            timeout=cdk.Duration.seconds(1),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "APP_ENV": "Production",
                "MAX_MSGS_TO_PRODUCE": "5",
                "TOPIC_ARN": f"{self.store_events_topic.topic_arn}",
                "TRIGGER_RANDOM_DELAY": "True"
            },
        )

        # Grant our Lambda Producer privileges to write to SNS Data Stream
        self.store_events_topic.grant_publish(data_producer_fn)

        data_producer_fn_version = data_producer_fn.latest_version
        data_producer_fn_version_alias = _lambda.Alias(
            self,
            "streamDataProducerFnAlias",
            alias_name="MystiqueAutomation",
            version=data_producer_fn_version,
        )

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "streamDataProducerFnLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY,
        )

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=cdk.Aws.ACCOUNT_ID,
            source_arn=self.store_events_topic.topic_arn)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page.",
        )

        output_1 = cdk.CfnOutput(
            self,
            "StoreOrdersEventsProducer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={cdk.Aws.REGION}#/functions/{data_producer_fn.function_name}",
            description="Produce streaming data events and push to SNS Topic.",
        )
Ejemplo n.º 17
0
    def __init__(self, scope: Construct, construct_id: str,
                 **kwargs: str) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.vpc = ec2.Vpc(
            self,
            "aws-data-wrangler-vpc",
            cidr="11.19.224.0/19",
            enable_dns_hostnames=True,
            enable_dns_support=True,
        )
        Tags.of(self.vpc).add("Name", "aws-data-wrangler")
        self.key = kms.Key(
            self,
            id="aws-data-wrangler-key",
            description="Aws Data Wrangler Test Key.",
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    sid="Enable IAM User Permissions",
                    effect=iam.Effect.ALLOW,
                    actions=["kms:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=["*"],
                )
            ]),
        )
        kms.Alias(
            self,
            "aws-data-wrangler-key-alias",
            alias_name="alias/aws-data-wrangler-key",
            target_key=self.key,
        )
        self.bucket = s3.Bucket(
            self,
            id="aws-data-wrangler",
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
            lifecycle_rules=[
                s3.LifecycleRule(
                    id="CleaningUp",
                    enabled=True,
                    expiration=Duration.days(1),
                    abort_incomplete_multipart_upload_after=Duration.days(1),
                ),
            ],
            versioned=True,
        )
        glue_db = glue.Database(
            self,
            id="aws_data_wrangler_glue_database",
            database_name="aws_data_wrangler",
            location_uri=f"s3://{self.bucket.bucket_name}",
        )
        log_group = logs.LogGroup(
            self,
            id="aws_data_wrangler_log_group",
            retention=logs.RetentionDays.ONE_MONTH,
        )
        log_stream = logs.LogStream(
            self,
            id="aws_data_wrangler_log_stream",
            log_group=log_group,
        )
        CfnOutput(self, "Region", value=self.region)
        CfnOutput(
            self,
            "VPC",
            value=self.vpc.vpc_id,
            export_name="aws-data-wrangler-base-VPC",
        )
        CfnOutput(
            self,
            "PublicSubnet1",
            value=self.vpc.public_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet1",
        )
        CfnOutput(
            self,
            "PublicSubnet2",
            value=self.vpc.public_subnets[1].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet2",
        )
        CfnOutput(
            self,
            "PrivateSubnet",
            value=self.vpc.private_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PrivateSubnet",
        )
        CfnOutput(
            self,
            "KmsKeyArn",
            value=self.key.key_arn,
            export_name="aws-data-wrangler-base-KmsKeyArn",
        )
        CfnOutput(
            self,
            "BucketName",
            value=self.bucket.bucket_name,
            export_name="aws-data-wrangler-base-BucketName",
        )
        CfnOutput(self, "GlueDatabaseName", value=glue_db.database_name)
        CfnOutput(self, "LogGroupName", value=log_group.log_group_name)
        CfnOutput(self, "LogStream", value=log_stream.log_stream_name)
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)
        self.access_point = f"arn:aws:s3:{Aws.REGION}:{Aws.ACCOUNT_ID}:accesspoint/" \
                            f"{S3_ACCESS_POINT_NAME}"

        # Set up a bucket
        bucket = s3.Bucket(
            self,
            "example-bucket",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)
        # Delegating access control to access points
        # https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points-policies.html
        bucket.add_to_resource_policy(
            iam.PolicyStatement(
                actions=["*"],
                principals=[iam.AnyPrincipal()],
                resources=[bucket.bucket_arn,
                           bucket.arn_for_objects('*')],
                conditions={
                    "StringEquals": {
                        "s3:DataAccessPointAccount": f"{Aws.ACCOUNT_ID}"
                    }
                }))

        # lambda to process our objects during retrieval
        retrieve_transformed_object_lambda = _lambda.Function(
            self,
            "retrieve_transformed_obj_lambda",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="index.handler",
            code=_lambda.Code.from_asset(
                "lambda/retrieve_transformed_object_lambda"))

        # Object lambda s3 access
        retrieve_transformed_object_lambda.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=["*"],
                actions=["s3-object-lambda:WriteGetObjectResponse"]))
        # Restrict Lambda to be invoked from own account
        retrieve_transformed_object_lambda.add_permission(
            "invocationRestriction",
            action="lambda:InvokeFunction",
            principal=iam.AccountRootPrincipal(),
            source_account=Aws.ACCOUNT_ID)

        # Associate Bucket's access point with lambda get access
        policy_doc = iam.PolicyDocument()
        policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["s3:GetObject"],
            principals=[
                iam.ArnPrincipal(
                    retrieve_transformed_object_lambda.role.role_arn)
            ],
            resources=[f"{self.access_point}/object/*"])
        policy_statement.sid = "AllowLambdaToUseAccessPoint"
        policy_doc.add_statements(policy_statement)

        example_bucket_ap = s3.CfnAccessPoint(self,
                                              "example-bucket_ap",
                                              bucket=bucket.bucket_name,
                                              name=S3_ACCESS_POINT_NAME,
                                              policy=policy_doc)

        # Access point to receive GET request and use lambda to process objects
        object_lambda_ap = s3_object_lambda.CfnAccessPoint(
            self,
            "s3_object_lambda_ap",
            name=OBJECT_LAMBDA_ACCESS_POINT_NAME,
            object_lambda_configuration=s3_object_lambda.CfnAccessPoint.
            ObjectLambdaConfigurationProperty(
                supporting_access_point=self.access_point,
                transformation_configurations=[
                    s3_object_lambda.CfnAccessPoint.
                    TransformationConfigurationProperty(
                        actions=["GetObject"],
                        content_transformation={
                            "AwsLambda": {
                                "FunctionArn":
                                f"{retrieve_transformed_object_lambda.function_arn}"
                            }
                        })
                ]))

        CfnOutput(self, "exampleBucketArn", value=bucket.bucket_arn)
        CfnOutput(self,
                  "objectLambdaArn",
                  value=retrieve_transformed_object_lambda.function_arn)
        CfnOutput(self,
                  "objectLambdaAccessPointArn",
                  value=object_lambda_ap.attr_arn)
        CfnOutput(
            self,
            "objectLambdaAccessPointUrl",
            value=f"https://console.aws.amazon.com/s3/olap/{Aws.ACCOUNT_ID}/"
            f"{OBJECT_LAMBDA_ACCESS_POINT_NAME}?region={Aws.REGION}")
Ejemplo n.º 19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16")
        self.eks_vpc = eks_vpc

        # Create IAM Role For code-server bastion
        bastion_role = iam.Role(
            self,
            "BastionRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("ec2.amazonaws.com"),
                iam.AccountRootPrincipal()),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AdministratorAccess")
            ])
        self.bastion_role = bastion_role
        # Create EC2 Instance Profile for that Role
        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile", roles=[bastion_role.role_name])

        # Create SecurityGroup for the Control Plane ENIs
        eks_security_group = ec2.SecurityGroup(self,
                                               "EKSSecurityGroup",
                                               vpc=eks_vpc,
                                               allow_all_outbound=True)

        eks_security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'),
                                            ec2.Port.all_traffic())

        # Create an EKS Cluster
        eks_cluster = eks.Cluster(
            self,
            "cluster",
            cluster_name="cluster",
            vpc=eks_vpc,
            masters_role=bastion_role,
            default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
            default_capacity_instance=ec2.InstanceType("m5.large"),
            default_capacity=2,
            security_group=eks_security_group,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
            version=eks.KubernetesVersion.V1_17)
        self.cluster_cert = eks_cluster.cluster_certificate_authority_data

        # Deploy ALB Ingress Controller
        # Create the k8s Service account and corresponding IAM Role mapped via IRSA
        alb_service_account = eks_cluster.add_service_account(
            "alb-ingress-controller",
            name="alb-ingress-controller",
            namespace="kube-system")

        # Create the PolicyStatements to attach to the role
        # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these
        alb_policy_statement_json_1 = {
            "Effect":
            "Allow",
            "Action": [
                "acm:DescribeCertificate", "acm:ListCertificates",
                "acm:GetCertificate"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_2 = {
            "Effect":
            "Allow",
            "Action": [
                "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup",
                "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup",
                "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses",
                "ec2:DescribeInstances", "ec2:DescribeInstanceStatus",
                "ec2:DescribeInternetGateways",
                "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups",
                "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs",
                "ec2:ModifyInstanceAttribute",
                "ec2:ModifyNetworkInterfaceAttribute",
                "ec2:RevokeSecurityGroupIngress"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_3 = {
            "Effect":
            "Allow",
            "Action": [
                "elasticloadbalancing:AddListenerCertificates",
                "elasticloadbalancing:AddTags",
                "elasticloadbalancing:CreateListener",
                "elasticloadbalancing:CreateLoadBalancer",
                "elasticloadbalancing:CreateRule",
                "elasticloadbalancing:CreateTargetGroup",
                "elasticloadbalancing:DeleteListener",
                "elasticloadbalancing:DeleteLoadBalancer",
                "elasticloadbalancing:DeleteRule",
                "elasticloadbalancing:DeleteTargetGroup",
                "elasticloadbalancing:DeregisterTargets",
                "elasticloadbalancing:DescribeListenerCertificates",
                "elasticloadbalancing:DescribeListeners",
                "elasticloadbalancing:DescribeLoadBalancers",
                "elasticloadbalancing:DescribeLoadBalancerAttributes",
                "elasticloadbalancing:DescribeRules",
                "elasticloadbalancing:DescribeSSLPolicies",
                "elasticloadbalancing:DescribeTags",
                "elasticloadbalancing:DescribeTargetGroups",
                "elasticloadbalancing:DescribeTargetGroupAttributes",
                "elasticloadbalancing:DescribeTargetHealth",
                "elasticloadbalancing:ModifyListener",
                "elasticloadbalancing:ModifyLoadBalancerAttributes",
                "elasticloadbalancing:ModifyRule",
                "elasticloadbalancing:ModifyTargetGroup",
                "elasticloadbalancing:ModifyTargetGroupAttributes",
                "elasticloadbalancing:RegisterTargets",
                "elasticloadbalancing:RemoveListenerCertificates",
                "elasticloadbalancing:RemoveTags",
                "elasticloadbalancing:SetIpAddressType",
                "elasticloadbalancing:SetSecurityGroups",
                "elasticloadbalancing:SetSubnets",
                "elasticloadbalancing:SetWebAcl"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_4 = {
            "Effect":
            "Allow",
            "Action": [
                "iam:CreateServiceLinkedRole", "iam:GetServerCertificate",
                "iam:ListServerCertificates"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_5 = {
            "Effect": "Allow",
            "Action": ["cognito-idp:DescribeUserPoolClient"],
            "Resource": "*"
        }
        alb_policy_statement_json_6 = {
            "Effect":
            "Allow",
            "Action": [
                "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL",
                "waf-regional:AssociateWebACL",
                "waf-regional:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_7 = {
            "Effect": "Allow",
            "Action": ["tag:GetResources", "tag:TagResources"],
            "Resource": "*"
        }
        alb_policy_statement_json_8 = {
            "Effect": "Allow",
            "Action": ["waf:GetWebACL"],
            "Resource": "*"
        }
        alb_policy_statement_json_9 = {
            "Effect":
            "Allow",
            "Action": [
                "wafv2:GetWebACL", "wafv2:GetWebACLForResource",
                "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_10 = {
            "Effect":
            "Allow",
            "Action": [
                "shield:DescribeProtection", "shield:GetSubscriptionState",
                "shield:DeleteProtection", "shield:CreateProtection",
                "shield:DescribeSubscription", "shield:ListProtections"
            ],
            "Resource":
            "*"
        }

        # Attach the necessary permissions
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_1))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_2))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_3))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_4))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_5))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_6))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_7))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_8))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_9))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_10))

        # Deploy the ALB Ingress Controller from the Helm chart
        eks_cluster.add_helm_chart(
            "aws-alb-ingress-controller",
            chart="aws-alb-ingress-controller",
            repository=
            "http://storage.googleapis.com/kubernetes-charts-incubator",
            namespace="kube-system",
            values={
                "clusterName": "cluster",
                "awsRegion": os.environ["CDK_DEFAULT_REGION"],
                "awsVpcID": eks_vpc.vpc_id,
                "rbac": {
                    "create": True,
                    "serviceAccount": {
                        "create": False,
                        "name": "alb-ingress-controller"
                    }
                }
            })

        # Create code-server bastion
        # Get Latest Amazon Linux AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Create SecurityGroup for code-server
        security_group = ec2.SecurityGroup(self,
                                           "SecurityGroup",
                                           vpc=eks_vpc,
                                           allow_all_outbound=True)

        security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                        ec2.Port.tcp(8080))

        # Create our EC2 instance running CodeServer
        code_server_instance = ec2.Instance(
            self,
            "CodeServerInstance",
            instance_type=ec2.InstanceType("t3.large"),
            machine_image=amzn_linux,
            role=bastion_role,
            vpc=eks_vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=security_group,
            block_devices=[
                ec2.BlockDevice(device_name="/dev/xvda",
                                volume=ec2.BlockDeviceVolume.ebs(20))
            ])

        # Add UserData
        code_server_instance.user_data.add_commands(
            "mkdir -p ~/.local/lib ~/.local/bin ~/.config/code-server")
        code_server_instance.user_data.add_commands(
            "curl -fL https://github.com/cdr/code-server/releases/download/v3.5.0/code-server-3.5.0-linux-amd64.tar.gz | tar -C ~/.local/lib -xz"
        )
        code_server_instance.user_data.add_commands(
            "mv ~/.local/lib/code-server-3.5.0-linux-amd64 ~/.local/lib/code-server-3.5.0"
        )
        code_server_instance.user_data.add_commands(
            "ln -s ~/.local/lib/code-server-3.5.0/bin/code-server ~/.local/bin/code-server"
        )
        code_server_instance.user_data.add_commands(
            "echo \"bind-addr: 0.0.0.0:8080\" > ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"auth: password\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "echo \"password: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)\" >> ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"cert: false\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "~/.local/bin/code-server &")
        code_server_instance.user_data.add_commands(
            "yum -y install jq gettext bash-completion moreutils")
        code_server_instance.user_data.add_commands(
            "sudo pip install --upgrade awscli && hash -r")
        code_server_instance.user_data.add_commands(
            "echo 'export ALB_INGRESS_VERSION=\"v1.1.8\"' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "curl --silent --location -o /usr/local/bin/kubectl \"https://amazon-eks.s3.us-west-2.amazonaws.com/1.17.9/2020-08-04/bin/linux/amd64/kubectl\""
        )
        code_server_instance.user_data.add_commands(
            "chmod +x /usr/local/bin/kubectl")
        code_server_instance.user_data.add_commands(
            "curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash"
        )
        code_server_instance.user_data.add_commands(
            "export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account)"
        )
        code_server_instance.user_data.add_commands(
            "export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export ACCOUNT_ID=${ACCOUNT_ID}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export AWS_REGION=${AWS_REGION}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "aws configure set default.region ${AWS_REGION}")
        code_server_instance.user_data.add_commands(
            "curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -"
        )
        code_server_instance.user_data.add_commands("yum -y install nodejs")
        code_server_instance.user_data.add_commands(
            "amazon-linux-extras enable python3")
        code_server_instance.user_data.add_commands(
            "yum install -y python3 --disablerepo amzn2-core")
        code_server_instance.user_data.add_commands("yum install -y git")
        code_server_instance.user_data.add_commands(
            "rm /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip"
        )
        code_server_instance.user_data.add_commands("npm install -g aws-cdk")
        code_server_instance.user_data.add_commands(
            "echo 'export KUBECONFIG=~/.kube/config' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "git clone https://github.com/jasonumiker/eks-school.git")

        # Add ALB
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=eks_vpc,
                                           internet_facing=True)
        listener = lb.add_listener("Listener", port=80)
        listener.connections.allow_default_port_from_any_ipv4(
            "Open to the Internet")
        listener.connections.allow_to_any_ipv4(
            port_range=ec2.Port(string_representation="TCP 8080",
                                protocol=ec2.Protocol.TCP,
                                from_port=8080,
                                to_port=8080))
        listener.add_targets(
            "Target",
            port=8080,
            targets=[
                elbv2.InstanceTarget(
                    instance_id=code_server_instance.instance_id, port=8080)
            ])
Ejemplo n.º 20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eks_vpc = ec2.Vpc(
            self, "VPC",
            cidr="10.0.0.0/16"
        )

        # Create IAM Role For EC2 bastion instance to be able to manage the cluster
        bastion_role = iam.Role(
            self, "BastionRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("ec2.amazonaws.com"),
                iam.AccountRootPrincipal()
            )
        )
        self.bastion_role = bastion_role
        # Create EC2 Instance Profile for that Role
        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile",
            roles=[bastion_role.role_name]            
        )

        # Create SecurityGroup for the Control Plane ENIs
        eks_security_group = ec2.SecurityGroup(
            self, "EKSSecurityGroup",
            vpc=eks_vpc,
            allow_all_outbound=True
        )
        
        eks_security_group.add_ingress_rule(
            ec2.Peer.ipv4('10.0.0.0/16'),
            ec2.Port.all_traffic()
        )    

        # Create an EKS Cluster
        eks_cluster = eks.Cluster(
            self, "cluster",
            vpc=eks_vpc,
            masters_role=bastion_role,
            default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
            default_capacity_instance=ec2.InstanceType("m5.large"),
            default_capacity=2,
            security_group=eks_security_group,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
            version=eks.KubernetesVersion.V1_18
        )

        # Deploy ALB Ingress Controller
        # Create the k8s Service account and corresponding IAM Role mapped via IRSA
        alb_service_account = eks_cluster.add_service_account(
            "aws-load-balancer-controller",
            name="aws-load-balancer-controller",
            namespace="kube-system"
        )

        # Create the PolicyStatements to attach to the role
        # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these
        alb_policy_statement_json_1 = {
            "Effect": "Allow",
            "Action": [
                "acm:DescribeCertificate",
                "acm:ListCertificates",
                "acm:GetCertificate"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_2 = {
            "Effect": "Allow",
            "Action": [
                "ec2:AuthorizeSecurityGroupIngress",
                "ec2:CreateSecurityGroup",
                "ec2:CreateTags",
                "ec2:DeleteTags",
                "ec2:DeleteSecurityGroup",
                "ec2:DescribeAccountAttributes",
                "ec2:DescribeAddresses",
                "ec2:DescribeInstances",
                "ec2:DescribeInstanceStatus",
                "ec2:DescribeInternetGateways",
                "ec2:DescribeNetworkInterfaces",
                "ec2:DescribeSecurityGroups",
                "ec2:DescribeSubnets",
                "ec2:DescribeTags",
                "ec2:DescribeVpcs",
                "ec2:ModifyInstanceAttribute",
                "ec2:ModifyNetworkInterfaceAttribute",
                "ec2:RevokeSecurityGroupIngress"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_3 = {
            "Effect": "Allow",
            "Action": [
                "elasticloadbalancing:AddListenerCertificates",
                "elasticloadbalancing:AddTags",
                "elasticloadbalancing:CreateListener",
                "elasticloadbalancing:CreateLoadBalancer",
                "elasticloadbalancing:CreateRule",
                "elasticloadbalancing:CreateTargetGroup",
                "elasticloadbalancing:DeleteListener",
                "elasticloadbalancing:DeleteLoadBalancer",
                "elasticloadbalancing:DeleteRule",
                "elasticloadbalancing:DeleteTargetGroup",
                "elasticloadbalancing:DeregisterTargets",
                "elasticloadbalancing:DescribeListenerCertificates",
                "elasticloadbalancing:DescribeListeners",
                "elasticloadbalancing:DescribeLoadBalancers",
                "elasticloadbalancing:DescribeLoadBalancerAttributes",
                "elasticloadbalancing:DescribeRules",
                "elasticloadbalancing:DescribeSSLPolicies",
                "elasticloadbalancing:DescribeTags",
                "elasticloadbalancing:DescribeTargetGroups",
                "elasticloadbalancing:DescribeTargetGroupAttributes",
                "elasticloadbalancing:DescribeTargetHealth",
                "elasticloadbalancing:ModifyListener",
                "elasticloadbalancing:ModifyLoadBalancerAttributes",
                "elasticloadbalancing:ModifyRule",
                "elasticloadbalancing:ModifyTargetGroup",
                "elasticloadbalancing:ModifyTargetGroupAttributes",
                "elasticloadbalancing:RegisterTargets",
                "elasticloadbalancing:RemoveListenerCertificates",
                "elasticloadbalancing:RemoveTags",
                "elasticloadbalancing:SetIpAddressType",
                "elasticloadbalancing:SetSecurityGroups",
                "elasticloadbalancing:SetSubnets",
                "elasticloadbalancing:SetWebAcl"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_4 = {
            "Effect": "Allow",
            "Action": [
                "iam:CreateServiceLinkedRole",
                "iam:GetServerCertificate",
                "iam:ListServerCertificates"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_5 = {
            "Effect": "Allow",
            "Action": [
                "cognito-idp:DescribeUserPoolClient"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_6 = {
            "Effect": "Allow",
            "Action": [
                "waf-regional:GetWebACLForResource",
                "waf-regional:GetWebACL",
                "waf-regional:AssociateWebACL",
                "waf-regional:DisassociateWebACL"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_7 = {
            "Effect": "Allow",
            "Action": [
                "tag:GetResources",
                "tag:TagResources"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_8 = {
            "Effect": "Allow",
            "Action": [
                "waf:GetWebACL"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_9 = {
            "Effect": "Allow",
            "Action": [
                "wafv2:GetWebACL",
                "wafv2:GetWebACLForResource",
                "wafv2:AssociateWebACL",
                "wafv2:DisassociateWebACL"
            ],
            "Resource": "*"
        }
        alb_policy_statement_json_10 = {
            "Effect": "Allow",
            "Action": [
                "shield:DescribeProtection",
                "shield:GetSubscriptionState",
                "shield:DeleteProtection",
                "shield:CreateProtection",
                "shield:DescribeSubscription",
                "shield:ListProtections"
            ],
            "Resource": "*"
        }
        
        # Attach the necessary permissions
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_1))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_2))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_3))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_4))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_5))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_6))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_7))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_8))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_9))
        alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_10))

        # Deploy the ALB Ingress Controller from the Helm chart
        eks_cluster.add_helm_chart(
            "aws-load-balancer-controller",
            chart="aws-load-balancer-controller",
            repository="https://aws.github.io/eks-charts",
            namespace="kube-system",
            values={
                "clusterName": eks_cluster.cluster_name,
                "region": self.region,
                "vpcId": eks_vpc.vpc_id,
                "serviceAccount": {
                    "create": False,
                    "name": "aws-load-balancer-controller"
                }
            }
        )

        # Deploy External DNS Controller
        # Create the k8s Service account and corresponding IAM Role mapped via IRSA
        externaldns_service_account = eks_cluster.add_service_account(
            "external-dns",
            name="external-dns",
            namespace="kube-system"
        )

        # Create the PolicyStatements to attach to the role
        externaldns_policy_statement_json_1 = {
        "Effect": "Allow",
            "Action": [
                "route53:ChangeResourceRecordSets"
            ],
            "Resource": [
                "arn:aws:route53:::hostedzone/*"
            ]
        }
        externaldns_policy_statement_json_2 = {
            "Effect": "Allow",
            "Action": [
                "route53:ListHostedZones",
                "route53:ListResourceRecordSets"
            ],
            "Resource": [
                "*"
            ]
        }

        # Add the policies to the service account
        externaldns_service_account.add_to_policy(iam.PolicyStatement.from_json(externaldns_policy_statement_json_1))
        externaldns_service_account.add_to_policy(iam.PolicyStatement.from_json(externaldns_policy_statement_json_2))

        # Deploy the Helm Chart
        eks_cluster.add_helm_chart(
            "external-dns",
            chart="external-dns",
            repository="https://charts.bitnami.com/bitnami",
            namespace="kube-system",
            values={
                "provider": "aws",
                "aws": {
                    "region": self.region
                },
                "serviceAccount": {
                    "create": False,
                    "name": "external-dns"
                },
                "podSecurityContext": {
                    "fsGroup": 65534
                }
            }
        )    

        # Install external secrets controller
        # Create the Service Account
        externalsecrets_service_account = eks_cluster.add_service_account(
            "kubernetes-external-secrets",
            name="kubernetes-external-secrets",
            namespace="kube-system"
        )

        # Define the policy in JSON
        externalsecrets_policy_statement_json_1 = {
        "Effect": "Allow",
            "Action": [
                "secretsmanager:GetResourcePolicy",
                "secretsmanager:GetSecretValue",
                "secretsmanager:DescribeSecret",
                "secretsmanager:ListSecretVersionIds"
            ],
            "Resource": [
                "*"
            ]
        }

        # Add the policies to the service account
        externalsecrets_service_account.add_to_policy(iam.PolicyStatement.from_json(externalsecrets_policy_statement_json_1))

        # Deploy the Helm Chart
        eks_cluster.add_helm_chart(
            "external-secrets",
            chart="kubernetes-external-secrets",            
            repository="https://external-secrets.github.io/kubernetes-external-secrets/",
            namespace="kube-system",
            values={
                "env": {
                    "AWS_REGION": self.region
                },
                "serviceAccount": {
                    "name": "kubernetes-external-secrets",
                    "create": False
                },
                "securityContext": {
                    "fsGroup": 65534
                }
            }
        )

        # Deploy Flux
        # Deploy the Helm Chart
        eks_cluster.add_helm_chart(
            "flux",
            chart="flux",
            repository="https://charts.fluxcd.io",
            namespace="kube-system",
            values={
                "git": {
                    "url": "[email protected]:jasonumiker/k8s-plus-aws-gitops",
                    "path": "k8s-app-resources",
                    "branch": "master"
                }
            }
        )

        # Deploy Prometheus and Grafana
        # TODO Replace this with the new AWS Managed Prometheus and Grafana when available
        eks_cluster.add_helm_chart(
            "metrics",
            chart="kube-prometheus-stack",
            repository="https://prometheus-community.github.io/helm-charts",
            namespace="monitoring",
            values={
                "prometheus": {
                    "prometheusSpec": {
                    "storageSpec": {
                        "volumeClaimTemplate": {
                        "spec": {
                            "accessModes": [
                            "ReadWriteOnce"
                            ],
                            "resources": {
                            "requests": {
                                "storage": "8Gi"
                            }
                            },
                            "storageClassName": "gp2"
                        }
                        }
                    }
                    }
                },
                "alertmanager": {
                    "alertmanagerSpec": {
                    "storage": {
                        "volumeClaimTemplate": {
                        "spec": {
                            "accessModes": [
                            "ReadWriteOnce"
                            ],
                            "resources": {
                            "requests": {
                                "storage": "2Gi"
                            }
                            },
                            "storageClassName": "gp2"
                        }
                        }
                    }
                    }
                },
                "grafana": {
                    "persistence": {
                        "enabled": "true",
                        "storageClassName": "gp2"
                    }
                }
            }          
        )

        # Deploy Fluentbit and Elasticsearch
        # Deploy an ElasticSearch Domain
        es_domain = es.Domain(
            self, "ESDomain",
            version=es.ElasticsearchVersion.V7_9
        )
        # Create the Service Account
        fluentbit_service_account = eks_cluster.add_service_account(
            "fluentbit",
            name="fluentbit",
            namespace="monitoring"
        )

        # Define the policy in JSON
        fluentbit_policy_statement_json_1 = {
        "Effect": "Allow",
            "Action": [
                "es:ESHttp*"
            ],
            "Resource": [
                es_domain.domain_arn
            ]
        }

        # Add the policies to the service account
        fluentbit_service_account.add_to_policy(iam.PolicyStatement.from_json(externalsecrets_policy_statement_json_1))

        # Grant fluentbit access to our ES Domain
        es_domain.grant_write(fluentbit_service_account)

        eks_cluster.add_helm_chart(
            "fluent-bit",
            chart="fluent-bit",
            repository="https://fluent.github.io/helm-charts",
            namespace="monitoring",
            values={
                "serviceAccount": {
                    "create": False,
                    "name": "fluentbit"
                },
                "config": {
                    "outputs": "[OUTPUT]\n    Name            es\n    Match           *\n    Host            "+es_domain.domain_endpoint+"\n    Port            443\n    TLS             On\n    AWS_Auth        On\n    AWS_Region      "+self.region+"\n    Retry_Limit     6\n",
                }
            }
        )    
Ejemplo n.º 21
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        stack_log_level: str,
        max_msg_receive_cnt: int,
        reliable_queue,
        **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below)

        # Read Lambda Code
        try:
            with open("stacks/back_end/serverless_sqs_consumer_stack/lambda_src/sqs_data_consumer.py",
                      encoding="utf-8",
                      mode="r"
                      ) as f:
                msg_consumer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise
        msg_consumer_fn = _lambda.Function(
            self,
            "msgConsumerFn",
            function_name=f"queue_consumer_fn_{construct_id}",
            description="Process messages in SQS queue",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(
                msg_consumer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production",
                "RELIABLE_QUEUE_NAME": f"{reliable_queue.queue_name}",
                "TRIGGER_RANDOM_DELAY": "True"
            }
        )

        # Create Custom Loggroup for Producer
        msg_consumer_fn_lg = _logs.LogGroup(
            self,
            "msgConsumerFnFnLogGroup",
            log_group_name=f"/aws/lambda/{msg_consumer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY
        )

        # # Grant our Lambda Consumer privileges to READ from SQS
        # reliable_queue.grant_consume_messages(msg_consumer_fn)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        msg_consumer_fn.add_permission(
            "restrictLambdaInvocationToOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=core.Aws.ACCOUNT_ID,
            source_arn=reliable_queue.queue_arn
        )

        # Set our Lambda Function to be invoked by SQS
        msg_consumer_fn.add_event_source(
            _sqsEventSource(reliable_queue, batch_size=5))

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page."
        )

        output_2 = core.CfnOutput(
            self,
            "msgConsumer",
            value=f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{msg_consumer_fn.function_name}",
            description="Process messages in SQS queue"
        )
Ejemplo n.º 22
0
    def __init__(self, stack: core.Stack, VPC: ec2.Vpc) -> None:

        cluster_admin = iam.Role(stack,
                                 "AdminRole",
                                 assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(stack,
                              "cluster",
                              default_capacity=1,
                              vpc=VPC,
                              masters_role=cluster_admin)

        #see https://github.com/kubernetes/kubernetes/issues/61486?#issuecomment-635169272
        eks.KubernetesPatch(
            stack,
            "patch",
            cluster=cluster,
            resource_name="daemonset/kube-proxy",
            resource_namespace="kube-system",
            apply_patch={
                "spec": {
                    "template": {
                        "spec": {
                            "containers": [{
                                "name":
                                "kube-proxy",
                                "command": [
                                    "kube-proxy",
                                    "--v=2",
                                    "--hostname-override=$(NODE_NAME)",
                                    "--config=/var/lib/kube-proxy-config/config",
                                ],
                                "env": [{
                                    "name": "NODE_NAME",
                                    "valueFrom": {
                                        "fieldRef": {
                                            "apiVersion": "v1",
                                            "fieldPath": "spec.nodeName"
                                        }
                                    }
                                }]
                            }]
                        }
                    }
                }
            },
            restore_patch={
                "spec": {
                    "template": {
                        "spec": {
                            "containers": [{
                                "name":
                                "kube-proxy",
                                "command": [
                                    "kube-proxy", "--v=2",
                                    "--config=/var/lib/kube-proxy-config/config"
                                ]
                            }]
                        }
                    }
                }
            })

        if stack.node.try_get_context("ingress"):
            print("Will deploy the Ingress stack")
            Ingress(cluster, stack)

        if stack.node.try_get_context("envoy_eks"):
            print("Will deploy the Envoy@EKS stack")
            Envoy(cluster, stack)
Ejemplo n.º 23
0
        storage_bucket_parameter: aws_ssm.StringParameter,
        validation_results_table: Table,
        **kwargs: Any,
    ) -> None:
        super().__init__(scope, stack_id, **kwargs)

        if saml_provider_arn := environ.get("DATALAKE_SAML_IDENTITY_PROVIDER_ARN"):
            principal = aws_iam.FederatedPrincipal(
                federated=saml_provider_arn,
                assume_role_action="sts:AssumeRoleWithSAML",
                conditions={"StringEquals": {"SAML:aud": "https://signin.aws.amazon.com/saml"}},
            )

        else:
            principal = aws_iam.AccountPrincipal(  # type: ignore[assignment]
                account_id=aws_iam.AccountRootPrincipal().account_id
            )

        users_role = aws_iam.Role(
            self,
            "users-role",
            role_name=ResourceName.USERS_ROLE_NAME.value,
            assumed_by=principal,  # type: ignore[arg-type]
            max_session_duration=MAX_SESSION_DURATION,
        )

        read_only_role = aws_iam.Role(
            self,
            "linz-read-only-role",
            role_name=ResourceName.READ_ONLY_ROLE_NAME.value,
            assumed_by=principal,  # type: ignore[arg-type]
Ejemplo n.º 24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pvt_bkt = s3.Bucket(
            self,
            "abacBucket",
            versioned=True,
            # encryption=s3.BucketEncryption.KMS_MANAGED,
            block_public_access=s3.BlockPublicAccess(block_public_policy=True),
            removal_policy=core.RemovalPolicy.DESTROY
            )

        pvt_bkt.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                # actions=["s3:GetObject"],
                actions=["s3:*"],
                # resources=[pvt_bkt.arn_for_objects("file.txt")],
                resources=[pvt_bkt.arn_for_objects("*")],
                principals=[iam.AccountRootPrincipal()]
            )
        )
        # Create 3 Users: 1 Admin & 2 Normal Users

        # Lets generate a password for our user
        redRosy_new_pass = random_string_generator(
            self,
            "redRosyNewPasswordGenerator",
            Length=20
        )

        redRosy = iam.User(
            self,
            "redRosy",
            user_name="redRosy",
            password=core.SecretValue.plain_text(redRosy_new_pass.response)
        )

        blueBob_new_pass = random_string_generator(
            self,
            "blueBobNewPasswordGenerator",
            Length=20
        )

        blueBob = iam.User(
            self,
            "blueBob",
            user_name="blueBob",
            password=core.SecretValue.plain_text(blueBob_new_pass.response)
        )

        annoyingAdmin_new_pass = random_string_generator(
            self,
            "annoyingAdminNewPasswordGenerator",
            Length=20
        )

        annoyingAdmin = iam.User(
            self,
            "annoyingAdmin",
            user_name="annoyingAdmin",
            password=core.SecretValue.plain_text(annoyingAdmin_new_pass.response)
        )

        teamUnicornGrp = iam.Group(
            self,
            "teamUnicorn",
            group_name="teamUnicorn"
        )

        # Add Users To Group
        teamUnicornGrp.add_user(redRosy)
        teamUnicornGrp.add_user(blueBob)
        teamUnicornGrp.add_user(annoyingAdmin)

        # blueGrp1.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess"))
        ##############################################
        # We need a custom resource to TAG IAM Users #
        ##############################################

        iamUserTaggerResp = iam_user_tagger(
            self, "iamTagger",
            message=[
                {"user":redRosy.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectRed'}
                                                    ]
                },
                {"user":blueBob.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectBlue'}
                                                    ]
                },
                {"user":annoyingAdmin.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'teamAdmin','Value':'yes'}
                                                    ]
                }
            ]
        )

        """
        resource = MyCustomResource(
            self, "iamTagger",
            message=[
                {"user":redRosy.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectRed'}
                                                    ]
                },
                {"user":blueBob.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectBlue'}
                                                    ]
                },
                {"user":annoyingAdmin.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'teamAdmin','Value':'yes'}
                                                    ]
                }
            ]
        )
        """

        # Lets Create the IAM Role
        # Uses belonging to this group, will be able to asume this role
        accountId=core.Aws.ACCOUNT_ID
        teamUnicornProjectRedRole = iam.Role(
            self,
            'teamUnicornProjectRedRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="teamUnicornProjectRedRole"
        )
        core.Tag.add(teamUnicornProjectRedRole, key="teamName",value="teamUnicorn")
        core.Tag.add(teamUnicornProjectRedRole, key="projectName",value="projectRed")

        teamUnicornProjectBlueRole = iam.Role(
            self,
            'teamUnicornProjectBlueRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="teamUnicornProjectBlueRole"
        )
        core.Tag.add(teamUnicornProjectBlueRole, key="teamName",value="teamUnicorn")
        core.Tag.add(teamUnicornProjectBlueRole, key="projectName",value="projectBlue")

        teamUnicornTeamAdminRole = iam.Role(
            self,
            'teamUnicornTeamAdminRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="teamUnicornTeamAdminRole"
        )
        core.Tag.add(teamUnicornTeamAdminRole, key="teamName",value="teamUnicorn")
        core.Tag.add(teamUnicornTeamAdminRole, key="teamAdmin",value="yes")

        # Allow Group to Assume Role
        grpStmt1=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[f"arn:aws:iam::{accountId}:role/teamUnicornProject*"],
                actions=["sts:AssumeRole"],
                conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}",
                                               "iam:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" 
                                            }
                        }
            )
        grpStmt1.sid="AllowGroupMembersToAssumeRoleMatchingTeamName"

        grpStmt2=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[f"arn:aws:iam::{accountId}:role/teamUnicornTeamAdminRole"],
                actions=["sts:AssumeRole"],
                conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}",
                                               "iam:ResourceTag/teamAdmin": "yes"
                                            }
                        }
            )
        grpStmt2.sid="AllowTeamAdminToAssumeRoleMatchingTeamName"
        teamUnicornGrp.add_to_policy( grpStmt1 )
        teamUnicornGrp.add_to_policy( grpStmt2 )

        # Add Permissions to the Role
        roleStmt1=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=["*"],
                actions=["s3:ListAllMyBuckets", "s3:HeadBucket"]
            )
        roleStmt1.sid="AllowGroupToSeeBucketListInTheConsole"

        roleStmt2=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.bucket_arn],
                actions=["s3:ListBucket","s3:ListBucketVersions"],
                # Below condition can be used to enable listing a particular prefix in another statement
                # conditions={ "StringEquals" : { "s3:prefix":[""], "s3:delimiter":["/"] } }
            )
        roleStmt2.sid="AllowRootLevelListingOfBucket"

        roleStmt3=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.arn_for_objects("*")],
                actions=["s3:Get*","s3:DeleteObjectTagging"],
                conditions={ "StringEquals": { "s3:ExistingObjectTag/teamName" : "${aws:PrincipalTag/teamName}",
                                               "s3:ExistingObjectTag/projectName" : "${aws:PrincipalTag/projectName}" 
                                            }
                        }
            )
        roleStmt3.sid="ReadOnlyAccessToTeams"

        roleStmt4=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.arn_for_objects("*")],
                actions=["s3:PutObject","s3:PutObjectTagging","s3:PutObjectVersionTagging"],
                conditions={ "StringEquals": { "s3:RequestObjectTag/teamName" : "${aws:PrincipalTag/teamName}",
                                               "s3:RequestObjectTag/projectName" : "${aws:PrincipalTag/projectName}" 
                                            }
                        }
            )
        roleStmt4.sid="WriteTaggedObjectOwnedByThem"

        roleStmt5=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.bucket_arn, pvt_bkt.arn_for_objects("*")],
                actions=["s3:*"],
                conditions={ 
                    "StringEquals" : { 
                        "${aws:PrincipalTag/teamAdmin}": [ "yes" ]
                    }
                }
            )
        roleStmt5.sid="FullAccessToAdminsFromSameTeam"

        teamUnicornProjectRedRole.add_to_policy( roleStmt1 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt2 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt3 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt4 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt5 )

        # Add same permissions to projectBlueRole
        teamUnicornProjectBlueRole.add_to_policy( roleStmt1 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt2 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt3 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt4 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt5 )

        # Add same permissions to teamAdminRole
        teamUnicornTeamAdminRole.add_to_policy( roleStmt1 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt2 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt3 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt4 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt5 )


        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output0 = core.CfnOutput(self,
            "SecuirtyAutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page."
        )

        output1_r = core.CfnOutput(self,
            "User:redRosy",
            value=redRosy_new_pass.response,
            description=f"Red Rosy User Password"
        )
        output1_b = core.CfnOutput(self,
            "User:blueBob",
            value=blueBob_new_pass.response,
            description=f"Red Rosy User Password"
        )
        output1_a = core.CfnOutput(self,
            "User:annoyingAdmin",
            value=annoyingAdmin_new_pass.response,
            description=f"Red Rosy User Password"
        )

        output2 = core.CfnOutput(self,
            "SecurePrivateBucket",
            value=(
                    f"https://console.aws.amazon.com/s3/buckets/"
                    f"{pvt_bkt.bucket_name}"
                ),
            description=f"S3 Bucket to Test ABAC"
        )

        output3 = core.CfnOutput(self,
            "Rosy-Assume-RedRole-Url",
            value=(
                    f"https://signin.aws.amazon.com/switchrole?roleName="
                    f"{teamUnicornProjectRedRole.role_name}"
                    f"&account="
                    f"{core.Aws.ACCOUNT_ID}"
                ),
            description=f"The URL for Rosy to assume teamRed Role"
        )


        output4 = core.CfnOutput(self,
            "blueBob-Assume-RedRole-Url",
            value=(
                    f"https://signin.aws.amazon.com/switchrole?roleName="
                    f"{teamUnicornProjectBlueRole.role_name}"
                    f"&account="
                    f"{core.Aws.ACCOUNT_ID}"
                ),
            description=f"The URL for Bob to assume teamBlue Role"
        )

        output5 = core.CfnOutput(self,
            "SampleS3UploadCommands",
            value=(
                    f"aws s3api put-object-tagging --bucket {pvt_bkt.bucket_name} --key YOUR-OBJECT --tagging 'TagSet=[{{Key=projectName,Value=teamRed}}]'"
                ),
            description=f"For ProjectRed"
        )

        output10 = core.CfnOutput(self,
            "User-Login-Url",
            value=(
                    f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console"
                ),
            description=f"The URL for Rosy to assume teamRed Role"
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 stack_log_level: str, orders_bus, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below)

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_eventbridge_consumer_stack/lambda_src/eventbridge_data_consumer.py",
                    encoding="utf-8",
                    mode="r") as f:
                msg_consumer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise
        msg_consumer_fn = _lambda.Function(
            self,
            "msgConsumerFn",
            function_name=f"events_consumer_fn",
            description="Process messages in EventBridge queue",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(msg_consumer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production"
            })

        msg_consumer_fn_version = msg_consumer_fn.latest_version
        msg_consumer_fn_version_alias = _lambda.Alias(
            self,
            "msgConsumerFnAlias",
            alias_name="MystiqueAutomation",
            version=msg_consumer_fn_version)

        # Create Custom Loggroup for Producer
        msg_consumer_fn_lg = _logs.LogGroup(
            self,
            "msgConsumerFnLogGroup",
            log_group_name=f"/aws/lambda/{msg_consumer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        msg_consumer_fn.add_permission("restrictLambdaInvocationToOwnAccount",
                                       principal=_iam.AccountRootPrincipal(),
                                       action="lambda:InvokeFunction",
                                       source_account=core.Aws.ACCOUNT_ID,
                                       source_arn=orders_bus.event_bus_arn)

        # Event Pattern
        self.orders_pattern = _evnts.EventPattern(detail_type=["sales-events"])

        # EventBridge Routing Rule
        self.orders_routing = _evnts.Rule(
            self,
            f"ordersEventRoutingRule01",
            description="A simple events routing rule",
            enabled=True,
            event_bus=orders_bus,
            event_pattern=self.orders_pattern,
            rule_name="orders_routing_to_consumer",
            targets=[_evnts_tgt.LambdaFunction(handler=msg_consumer_fn)])

        self.orders_routing.apply_removal_policy(core.RemovalPolicy.DESTROY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToFhInOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=core.Aws.ACCOUNT_ID)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_2 = core.CfnOutput(
            self,
            "msgConsumer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{msg_consumer_fn.function_name}",
            description="Process events received from eventbridge event bus")
    def __init__(self, scope: core.Construct, construct_id: str,
                 stack_log_level: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):

        # Maximum number of times, a message can be tried to be process from the queue before deleting
        self.max_msg_receive_cnt = 5
        self.max_msg_receive_cnt_at_retry = 3

        # Define Dead Letter Queue
        self.reliable_q_dlq = _sqs.Queue(
            self,
            "DeadLetterQueue",
            delivery_delay=core.Duration.seconds(100),
            queue_name=f"reliable_q_dlq",
            retention_period=core.Duration.days(2),
            visibility_timeout=core.Duration.seconds(10),
            receive_message_wait_time=core.Duration.seconds(10))

        # Define Retry Queue for Reliable Q
        self.reliable_q_retry_1 = _sqs.Queue(
            self,
            "reliableQueueRetry1",
            delivery_delay=core.Duration.seconds(10),
            queue_name=f"reliable_q_retry_1",
            retention_period=core.Duration.days(2),
            visibility_timeout=core.Duration.seconds(10),
            receive_message_wait_time=core.Duration.seconds(10),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=self.max_msg_receive_cnt_at_retry,
                queue=self.reliable_q_dlq))

        # Primary Source Queue
        self.reliable_q = _sqs.Queue(
            self,
            "reliableQueue",
            delivery_delay=core.Duration.seconds(5),
            queue_name=f"reliable_q",
            retention_period=core.Duration.days(2),
            visibility_timeout=core.Duration.seconds(10),
            receive_message_wait_time=core.Duration.seconds(10),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=self.max_msg_receive_cnt,
                queue=self.reliable_q_retry_1))

        ########################################
        #######                          #######
        #######     SQS Data Producer    #######
        #######                          #######
        ########################################

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_sqs_producer_stack/lambda_src/sqs_data_producer.py",
                    encoding="utf-8",
                    mode="r") as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        data_producer_fn = _lambda.Function(
            self,
            "sqsDataProducerFn",
            function_name=f"data_producer_fn_{construct_id}",
            description="Produce data events and push to SQS",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(data_producer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production",
                "RELIABLE_QUEUE_NAME": f"{self.reliable_q.queue_name}",
                "TRIGGER_RANDOM_FAILURES": "True"
            })

        # Grant our Lambda Producer privileges to write to SQS
        self.reliable_q.grant_send_messages(data_producer_fn)

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "dataProducerLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToFhInOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=core.Aws.ACCOUNT_ID)

        # Monitoring for Queue
        reliable_q_alarm = _cw.Alarm(
            self,
            "reliableQueueAlarm",
            metric=self.reliable_q.metric(
                "ApproximateNumberOfMessagesVisible"),
            statistic="sum",
            threshold=10,
            period=core.Duration.minutes(5),
            evaluation_periods=1,
            comparison_operator=_cw.ComparisonOperator.GREATER_THAN_THRESHOLD)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "SqsDataProducer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{data_producer_fn.function_name}",
            description="Produce data events and push to SQS Queue.")

        output_2 = core.CfnOutput(
            self,
            "ReliableQueue",
            value=
            f"https://console.aws.amazon.com/sqs/v2/home?region={core.Aws.REGION}#/queues",
            description="Reliable Queue")
Ejemplo n.º 27
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 stack_log_level, vpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create EKS Cluster Role
        # https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html

        self._eks_cluster_svc_role = _iam.Role(
            self,
            "c_SvcRole",
            assumed_by=_iam.ServicePrincipal("eks.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKSClusterPolicy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKS_CNI_Policy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKSVPCResourceController")
            ])

        self._eks_node_role = _iam.Role(
            self,
            "c_NodeRole",
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKSWorkerNodePolicy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKS_CNI_Policy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSSMManagedInstanceCore"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
                # Yes, yes...I know. :)
            ])

        c_admin_role = _iam.Role(
            self,
            "c_AdminRole",
            assumed_by=_iam.CompositePrincipal(
                _iam.AccountRootPrincipal(),
                _iam.ServicePrincipal("ec2.amazonaws.com")))
        c_admin_role.add_to_policy(
            _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                 actions=["eks:DescribeCluster"],
                                 resources=["*"]))

        # Create Security Group for EKS Cluster SG
        self.eks_cluster_sg = _ec2.SecurityGroup(
            self,
            "eksClusterSG",
            vpc=vpc,
            description="EKS Cluster security group",
            allow_all_outbound=True,
        )
        cdk.Tags.of(self.eks_cluster_sg).add("Name", "eks_cluster_sg")

        # https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
        self.eks_cluster_sg.add_ingress_rule(
            peer=self.eks_cluster_sg,
            connection=_ec2.Port.all_traffic(),
            description="Allow incoming within SG")

        clust_name = "c_1_event_processor"

        self.eks_cluster_1 = _eks.Cluster(
            self,
            f"{clust_name}",
            cluster_name=f"{clust_name}",
            version=_eks.KubernetesVersion.V1_18,
            vpc=vpc,
            vpc_subnets=[
                _ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC),
                _ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PRIVATE)
            ],
            default_capacity=0,
            masters_role=c_admin_role,
            role=self._eks_cluster_svc_role,
            security_group=self.eks_cluster_sg,
            endpoint_access=_eks.EndpointAccess.PUBLIC
            # endpoint_access=_eks.EndpointAccess.PUBLIC_AND_PRIVATE
        )

        node_grp_1 = self.eks_cluster_1.add_nodegroup_capacity(
            f"n_g_{clust_name}",
            nodegroup_name=f"{clust_name}_n_g",
            instance_types=[
                _ec2.InstanceType("t3.medium"),
                _ec2.InstanceType("t3.large"),
            ],
            disk_size=20,
            min_size=1,
            max_size=6,
            desired_size=2,
            labels={
                "app": "miztiik_ng",
                "lifecycle": "on_demand",
                "compute_provider": "ec2"
            },
            subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC),
            ami_type=_eks.NodegroupAmiType.AL2_X86_64,
            # remote_access=_eks.NodegroupRemoteAccess(ssh_key_name="eks-ssh-keypair"),
            capacity_type=_eks.CapacityType.ON_DEMAND,
            node_role=self._eks_node_role
            # bootstrap_options={"kubelet_extra_args": "--node-labels=node.kubernetes.io/lifecycle=spot,daemonset=active,app=general --eviction-hard imagefs.available<15% --feature-gates=CSINodeInfo=true,CSIDriverRegistry=true,CSIBlockVolume=true,ExpandCSIVolumes=true"}
        )

        # This code block will provision worker nodes with Fargate Profile configuration
        fargate_n_g_3 = self.eks_cluster_1.add_fargate_profile(
            "FargateEnabled",
            fargate_profile_name="miztiik_n_g_fargate",
            selectors=[
                _eks.Selector(namespace="default",
                              labels={"fargate": "enabled"})
            ])

        self.add_cluster_admin()
        # We like to use the Kubernetes Dashboard
        self.enable_dashboard()

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = cdk.CfnOutput(self,
                                 "eksClusterAdminRole",
                                 value=f"{c_admin_role.role_name}",
                                 description="EKS Cluster Admin Role")

        output_2 = cdk.CfnOutput(
            self,
            "eksClusterSvcRole",
            value=f"{self._eks_cluster_svc_role.role_name}",
            description="EKS Cluster Service Role")
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        vpc = _ec2.Vpc(self,
                       "ecs-vpc",
                       cidr="10.0.0.0/16",
                       nat_gateways=1,
                       max_azs=3)

        clusterAdmin = _iam.Role(self,
                                 "AdminRole",
                                 assumed_by=_iam.AccountRootPrincipal())

        cluster = _ecs.Cluster(self, "ecs-cluster", vpc=vpc)

        logging = _ecs.AwsLogDriver(stream_prefix="ecs-logs")

        taskRole = _iam.Role(
            self,
            f"ecs-taskRole-{cdk.Stack.stack_name}",
            role_name=f"ecs-taskRole-{cdk.Stack.stack_name}",
            assumed_by=_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        # ECS Contructs

        executionRolePolicy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=['*'],
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage",
                "logs:CreateLogStream", "logs:PutLogEvents"
            ])

        taskDef = _ecs.FargateTaskDefinition(self,
                                             "ecs-taskdef",
                                             task_role=taskRole)

        taskDef.add_to_execution_role_policy(executionRolePolicy)

        container = taskDef.add_container(
            'flask-app',
            image=_ecs.ContainerImage.from_registry(
                "nikunjv/flask-image:blue"),
            memory_limit_mib=256,
            cpu=256,
            logging=logging)

        container.add_port_mappings(
            _ecs.PortMapping(container_port=5000, protocol=_ecs.Protocol.TCP))

        fargateService = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "ecs-service",
            cluster=cluster,
            task_definition=taskDef,
            public_load_balancer=True,
            desired_count=3,
            listener_port=80)

        scaling = fargateService.service.auto_scale_task_count(max_capacity=6)

        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=10,
            scale_in_cooldown=cdk.Duration.seconds(300),
            scale_out_cooldown=cdk.Duration.seconds(300))

        # PIPELINE CONSTRUCTS

        # ECR Repo

        ecrRepo = ecr.Repository(self, "EcrRepo")

        gitHubSource = codebuild.Source.git_hub(
            owner='samuelhailemariam',
            repo='aws-ecs-fargate-cicd-cdk',
            webhook=True,
            webhook_filters=[
                codebuild.FilterGroup.in_event_of(
                    codebuild.EventAction.PUSH).and_branch_is('main'),
            ])

        # CODEBUILD - project

        project = codebuild.Project(
            self,
            "ECSProject",
            project_name=cdk.Aws.STACK_NAME,
            source=gitHubSource,
            environment=codebuild.BuildEnvironment(
                build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_2,
                privileged=True),
            environment_variables={
                "CLUSTER_NAME": {
                    'value': cluster.cluster_name
                },
                "ECR_REPO_URI": {
                    'value': ecrRepo.repository_uri
                }
            },
            build_spec=codebuild.BuildSpec.from_object({
                'version': "0.2",
                'phases': {
                    'pre_build': {
                        'commands': [
                            'env',
                            'export TAG=${CODEBUILD_RESOLVED_SOURCE_VERSION}'
                        ]
                    },
                    'build': {
                        'commands': [
                            'cd docker-app',
                            'docker build -t $ECR_REPO_URI:$TAG .',
                            '$(aws ecr get-login --no-include-email)',
                            'docker push $ECR_REPO_URI:$TAG'
                        ]
                    },
                    'post_build': {
                        'commands': [
                            'echo "In Post-Build Stage"', 'cd ..',
                            "printf '[{\"name\":\"flask-app\",\"imageUri\":\"%s\"}]' $ECR_REPO_URI:$TAG > imagedefinitions.json",
                            "pwd; ls -al; cat imagedefinitions.json"
                        ]
                    }
                },
                'artifacts': {
                    'files': ['imagedefinitions.json']
                }
            }))

        # PIPELINE ACTIONS

        sourceOutput = codepipeline.Artifact()
        buildOutput = codepipeline.Artifact()

        sourceAction = codepipeline_actions.GitHubSourceAction(
            action_name='GitHub_Source',
            owner='samuelhailemariam',
            repo='aws-ecs-fargate-cicd-cdk',
            branch='master',
            oauth_token=cdk.SecretValue.secrets_manager("/my/github/token"),
            output=sourceOutput)

        buildAction = codepipeline_actions.CodeBuildAction(
            action_name='codeBuild',
            project=project,
            input=sourceOutput,
            outputs=[buildOutput])

        manualApprovalAction = codepipeline_actions.ManualApprovalAction(
            action_name='Approve')

        deployAction = codepipeline_actions.EcsDeployAction(
            action_name='DeployAction',
            service=fargateService.service,
            image_file=codepipeline.ArtifactPath(buildOutput,
                                                 'imagedefinitions.json'))

        pipeline = codepipeline.Pipeline(self, "ECSPipeline")

        source_stage = pipeline.add_stage(stage_name="Source",
                                          actions=[sourceAction])

        build_stage = pipeline.add_stage(stage_name="Build",
                                         actions=[buildAction])

        approve_stage = pipeline.add_stage(stage_name="Approve",
                                           actions=[manualApprovalAction])

        deploy_stage = pipeline.add_stage(stage_name="Deploy-to-ECS",
                                          actions=[deployAction])

        ecrRepo.grant_pull_push(project.role)

        project.add_to_role_policy(
            _iam.PolicyStatement(resources=['cluster.cluster_arn'],
                                 actions=[
                                     "ecs:DescribeCluster",
                                     "ecr:GetAuthorizationToken",
                                     "ecr:BatchCheckLayerAvailability",
                                     "ecr:BatchGetImage",
                                     "ecr:GetDownloadUrlForLayer"
                                 ]))

        # OUTPUT

        cdk.CfnOutput(
            self,
            "LoadBlancer-DNS",
            value=fargateService.load_balancer.load_balancer_dns_name)
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        stack_log_level: str,
        vpc,
        kafka_client_sg,
        kafka_topic_name: str,
        **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):

        ########################################
        #######                          #######
        #######   Stream Data Producer   #######
        #######                          #######
        ########################################

        # Create AWS Kafka Layer
        kafka_layer = _lambda.LayerVersion(
            self,
            "kafkaLayer",
            code=_lambda.Code.from_asset(
                "stacks/back_end/serverless_kafka_producer_stack/lambda_src/layer_code/kafka_python3.zip"),
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7,
                _lambda.Runtime.PYTHON_3_8
            ],
            license=f"Mystique Lambda Layer of kafka, Refer to AWS for license.",
            description="Layer to for latest version of kafka"
        )

        # Read Lambda Code
        try:
            with open(
                "stacks/back_end/serverless_kafka_producer_stack/lambda_src/stream_data_producer.py",
                encoding="utf-8",
                mode="r",
            ) as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        data_producer_fn = _lambda.Function(
            self,
            "streamDataProducerFn",
            function_name=f"data_producer_{construct_id}",
            description="Produce streaming data events and push to Kafka Topic",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(data_producer_fn_code),
            handler="index.lambda_handler",
            timeout=cdk.Duration.seconds(10),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "APP_ENV": "Production",
                "TRIGGER_RANDOM_DELAY": "True",
                "STORE_EVENTS_TOPIC": f"{kafka_topic_name}",
                "KAFKA_BOOTSTRAP_SRV": "",
                "LD_LIBRARY_PATH": "/opt/python"
            },
            layers=[kafka_layer],
            security_group=kafka_client_sg,
            vpc=vpc,
            vpc_subnets=_ec2.SubnetType.PRIVATE
        )

        # Grant our Lambda Producer privileges to write to S3
        # sales_event_bkt.grant_read_write(data_producer_fn)

        data_producer_fn_version = data_producer_fn.latest_version
        data_producer_fn_version_alias = _lambda.Alias(
            self,
            "streamDataProducerFnAlias",
            alias_name="MystiqueAutomation",
            version=data_producer_fn_version,
        )

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "streamDataProducerFnLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY,
        )

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=cdk.Aws.ACCOUNT_ID,
            # source_arn=sales_event_bkt.bucket_arn
        )

        # https://docs.aws.amazon.com/lambda/latest/dg/lambda-intro-execution-role.html
        roleStmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=['*'],
            actions=['ec2:CreateNetworkInterface',
                     'ec2:DescribeNetworkInterfaces',
                     'ec2:DeleteNetworkInterface']
        )
        roleStmt1.sid = "AllowLambdaToManageVPCENI"
        data_producer_fn.add_to_role_policy(roleStmt1)

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page.",
        )

        output_1 = cdk.CfnOutput(
            self,
            "SaleOrderEventsProducer",
            value=f"https://console.aws.amazon.com/lambda/home?region={cdk.Aws.REGION}#/functions/{data_producer_fn.function_name}",
            description="Produce streaming data events and push to S3 Topic.",
        )
Ejemplo n.º 30
0
    def __init__(self, scope: core.Construct, id: str,
                 config: ContainerPipelineConfiguration, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(
            scope=self,
            id="EksVPC",
            cidr="12.0.0.0/16",
            nat_gateways=1,
        )

        master_role = iam.Role(
            self,
            'cluster-master-role',
            assumed_by=iam.AccountRootPrincipal(),
        )

        # EKS on Fargate cluster
        cluster = eks.FargateCluster(
            scope=self,
            id='EksOnFargate',
            vpc=vpc,
            masters_role=master_role,
            version=eks.KubernetesVersion.V1_19,
            output_config_command=True,
            output_cluster_name=True,
            output_masters_role_arn=True,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
        )
        # EKS with managed nodes
        # cluster = eks.Cluster(scope=self, id='EksManagedNodes',
        #                       vpc=vpc,
        #                       masters_role=master_role,
        #                       version=eks.KubernetesVersion.V1_19,
        #                       output_config_command=True,
        #                       output_cluster_name=True,
        #                       output_masters_role_arn=True,
        #                       default_capacity=3,
        #                       default_capacity_instance=ec2.InstanceType.of(
        #                           ec2.InstanceClass.STANDARD5,
        #                           ec2.InstanceSize.LARGE
        #                       ),
        #                       endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
        #                       )

        cluster.node.add_dependency(vpc)

        # EKS master IAM roles
        # cloud9_master_role = iam.Role.from_role_arn(self, "Cloud9-fargate-role",
        #                                             role_arn=f"arn:aws:iam::{self.account}:role/Cloud9-fargate-role")
        # cluster.aws_auth.add_masters_role(cloud9_master_role)

        # grant access to final CodeBuild stage projects to do kubectl apply
        # eks_master_role_for_codedeploy: Role = iam.Role(
        #     self, "EksMasterRoleForCodeDeploy",
        #     role_name=PhysicalName.GENERATE_IF_NEEDED,
        #     assumed_by=iam.ServicePrincipal('codebuild.amazonaws.com'),
        # )
        #
        # codedeploy_role_policy_statement = iam.PolicyStatement(
        #     actions=["eks:DescribeFargateProfile",
        #              "eks:ListTagsForResource",
        #              "eks:AccessKubernetesApi",
        #              "eks:DescribeCluster"],
        #     resources=["*"]
        # )
        # eks_master_role_for_codedeploy.add_to_policy(codedeploy_role_policy_statement)
        # cluster.aws_auth.add_masters_role(eks_master_role_for_codedeploy)

        # setup logs
        log_group = logs.LogGroup(self,
                                  "log_group",
                                  log_group_name=config.ProjectName + "-eks-" +
                                  config.stage,
                                  removal_policy=core.RemovalPolicy.DESTROY,
                                  retention=None)
        log_group.add_stream(config.ProjectName + "-" + config.stage +
                             "-eks-stream")

        # Create an K8S Service Account for AWS Load Balancer Controller on EKS cluster.
        # @aws_cdk/aws_eks module will also automatically create the corresponding IAM Role mapped via IRSA
        aws_lb_controller_service_account = cluster.add_service_account(
            "aws-load-balancer-controller",
            namespace="kube-system",
        )

        lb_acm_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'acm:DescribeCertificate',
                'acm:ListCertificates',
                'acm:GetCertificate',
            ],
            resources=['*'],
        )

        lb_ec2_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'ec2:AuthorizeSecurityGroupIngress',
                'ec2:CreateSecurityGroup',
                'ec2:CreateTags',
                'ec2:DeleteTags',
                'ec2:DeleteSecurityGroup',
                'ec2:DescribeAccountAttributes',
                'ec2:DescribeAddresses',
                'ec2:DescribeInstances',
                'ec2:DescribeInstanceStatus',
                'ec2:DescribeInternetGateways',
                'ec2:DescribeNetworkInterfaces',
                'ec2:DescribeSecurityGroups',
                'ec2:DescribeSubnets',
                'ec2:DescribeTags',
                'ec2:DescribeVpcs',
                'ec2:ModifyInstanceAttribute',
                'ec2:ModifyNetworkInterfaceAttribute',
                'ec2:RevokeSecurityGroupIngress',
            ],
            resources=['*'],
        )
        lb_elb_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'elasticloadbalancing:AddListenerCertificates',
                'elasticloadbalancing:AddTags',
                'elasticloadbalancing:CreateListener',
                'elasticloadbalancing:CreateLoadBalancer',
                'elasticloadbalancing:CreateRule',
                'elasticloadbalancing:CreateTargetGroup',
                'elasticloadbalancing:DeleteListener',
                'elasticloadbalancing:DeleteLoadBalancer',
                'elasticloadbalancing:DeleteRule',
                'elasticloadbalancing:DeleteTargetGroup',
                'elasticloadbalancing:DeregisterTargets',
                'elasticloadbalancing:DescribeListenerCertificates',
                'elasticloadbalancing:DescribeListeners',
                'elasticloadbalancing:DescribeLoadBalancers',
                'elasticloadbalancing:DescribeLoadBalancerAttributes',
                'elasticloadbalancing:DescribeRules',
                'elasticloadbalancing:DescribeSSLPolicies',
                'elasticloadbalancing:DescribeTags',
                'elasticloadbalancing:DescribeTargetGroups',
                'elasticloadbalancing:DescribeTargetGroupAttributes',
                'elasticloadbalancing:DescribeTargetHealth',
                'elasticloadbalancing:ModifyListener',
                'elasticloadbalancing:ModifyLoadBalancerAttributes',
                'elasticloadbalancing:ModifyRule',
                'elasticloadbalancing:ModifyTargetGroup',
                'elasticloadbalancing:ModifyTargetGroupAttributes',
                'elasticloadbalancing:RegisterTargets',
                'elasticloadbalancing:RemoveListenerCertificates',
                'elasticloadbalancing:RemoveTags',
                'elasticloadbalancing:SetIpAddressType',
                'elasticloadbalancing:SetSecurityGroups',
                'elasticloadbalancing:SetSubnets',
                'elasticloadbalancing:SetWebAcl',
            ],
            resources=['*'],
        )

        lb_iam_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'iam:CreateServiceLinkedRole',
                'iam:GetServerCertificate',
                'iam:ListServerCertificates',
            ],
            resources=['*'],
        )

        lb_cognito_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=['cognito-idp:DescribeUserPoolClient'],
            resources=['*'],
        )

        lb_waf_reg_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'waf-regional:GetWebACLForResource',
                'waf-regional:GetWebACL',
                'waf-regional:AssociateWebACL',
                'waf-regional:DisassociateWebACL',
            ],
            resources=['*'],
        )

        lb_tag_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=['tag:GetResources', 'tag:TagResources'],
            resources=['*'],
        )

        lb_waf_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=['waf:GetWebACL'],
            resources=['*'],
        )

        lb_wafv2_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'wafv2:GetWebACL',
                'wafv2:GetWebACLForResource',
                'wafv2:AssociateWebACL',
                'wafv2:DisassociateWebACL',
            ],
            resources=['*'],
        )

        lb_shield_policy_statements = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'shield:DescribeProtection',
                'shield:GetSubscriptionState',
                'shield:DeleteProtection',
                'shield:CreateProtection',
                'shield:DescribeSubscription',
                'shield:ListProtections',
            ],
            resources=['*'],
        )
        #
        aws_lb_controller_service_account.add_to_policy(
            lb_acm_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_ec2_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_elb_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_iam_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_cognito_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_waf_reg_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_tag_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_waf_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_wafv2_policy_statements)
        aws_lb_controller_service_account.add_to_policy(
            lb_shield_policy_statements)

        # Deploy AWS LoadBalancer Controller from the Helm chart
        lb_helm_values = dict(cluster_name=cluster.cluster_name,
                              region=self.region,
                              vpc_id=cluster.vpc.vpc_id,
                              create_service_account=False)

        helm_deploy = cluster.add_helm_chart(
            'aws-load-balancer-controller',
            chart="aws-load-balancer-controller",
            repository="https://aws.github.io/eks-charts",
            namespace="kube-system",
            values=lb_helm_values,
        )
        helm_deploy.node.add_dependency(cluster)