def add_helm_annotation(self, cluster, service_account):
        """
        workaround to add helm role to service account
        
        """

        return eks.KubernetesManifest(
            self,
            "ServiceAccountManifest",
            cluster=cluster,
            manifest=[{
                "apiVersion": "v1",
                "kind": "ServiceAccount",
                "metadata": {
                    "name": service_account.service_account_name,
                    "namespace": service_account.service_account_namespace,
                    "labels": {
                        "app.kubernetes.io/name":
                        service_account.service_account_name,
                        "app.kubernetes.io/managed-by": "Helm",
                    },
                    "annotations": {
                        "eks.amazonaws.com/role-arn":
                        service_account.role.role_arn,
                        "meta.helm.sh/release-name":
                        service_account.service_account_name,
                        "meta.helm.sh/release-namespace":
                        service_account.service_account_namespace,
                    },
                },
            }],
        )
    def add_service_account(self, cluster, name, namespace):
        """
        workaround to add helm role to service account
        
        """
        # create role
        conditions = core.CfnJson(
            self,
            'ConditionJson',
            value={
                "%s:aud" % cluster.cluster_open_id_connect_issuer:
                "sts.amazonaws.com",
                "%s:sub" % cluster.cluster_open_id_connect_issuer:
                "system:serviceaccount:%s:%s" % (namespace, name),
            },
        )
        principal = iam.OpenIdConnectPrincipal(
            cluster.open_id_connect_provider).with_conditions({
                "StringEquals":
                conditions,
            })
        role = iam.Role(self, 'ServiceAccountRole', assumed_by=principal)

        # create policy for the service account
        statements = []
        with open('backend/iam_policy.json') as f:
            data = json.load(f)
            for s in data["Statement"]:
                statements.append(iam.PolicyStatement.from_json(s))
        policy = iam.Policy(self, "LBControllerPolicy", statements=statements)
        policy.attach_to_role(role)

        return eks.KubernetesManifest(
            self,
            "ServiceAccount",
            cluster=cluster,
            manifest=[{
                "apiVersion": "v1",
                "kind": "ServiceAccount",
                "metadata": {
                    "name": name,
                    "namespace": namespace,
                    "labels": {
                        "app.kubernetes.io/name": name,
                        "app.kubernetes.io/managed-by": "Helm",
                    },
                    "annotations": {
                        "eks.amazonaws.com/role-arn": role.role_arn,
                        "meta.helm.sh/release-name": name,
                        "meta.helm.sh/release-namespace": namespace,
                    },
                },
            }],
        )
Beispiel #3
0
 def install_calico(self):
     # This produces an obnoxious diff on every subsequent run
     # Using a helm chart does not, so we should switch to that
     # However, we need to figure out how to get the helm chart
     # accessible by the CDK lambda first. Not clear how to give
     # s3 perms to it programmatically, and while ECR might be
     # an option it also doesn't seem like there's a way to push
     # the chart with existing api calls.
     # Probably need to do some custom lambda thing.
     for manifest in manifests:
         filename = f"{manifest[0]}.yaml"
         if isfile(filename):
             with open(filename) as f:
                 manifest_text = f.read()
         else:
             manifest_text = requests_get(manifest[1]).text
         loaded_manifests = [
             yaml_safe_load(i)
             for i in re_split("^---$", manifest_text, flags=MULTILINE) if i
         ]
         crds = eks.KubernetesManifest(
             self.scope,
             "calico-crds",
             cluster=self.eks_cluster,
             manifest=[
                 crd for crd in loaded_manifests
                 if crd["kind"] == "CustomResourceDefinition"
             ],
         )
         non_crds = eks.KubernetesManifest(
             self.scope,
             "calico",
             cluster=self.eks_cluster,
             manifest=[
                 notcrd for notcrd in loaded_manifests
                 if notcrd["kind"] != "CustomResourceDefinition"
             ],
         )
         non_crds.node.add_dependency(crds)
 def __init__(self,
              scope,
              eks_base_cluster,
              git_user,
              git_password,
              git_repository,
              git_branch="master"):
     main_manifest = self._base_manifest(git_repository)
     main_manifest.append(self._secret_manifest(git_user, git_password))
     eks_manifest = eks.KubernetesManifest(scope=scope,
                                           id="fluxcd-main-manifest-new",
                                           cluster=eks_base_cluster,
                                           manifest=main_manifest)
    def __init__(self, scope: core.Construct, id: str, cluster, manifest,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.cluster = cluster
        self.manifest = manifest

        def backend_service(self, manifest):
            labels = manifest['labels']
            deployment = {
                "apiVersion": "apps/v1",
                "kind": "Deployment",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    "replicas": manifest['replicas'],
                    "selector": {
                        "matchLabels": labels
                    },
                    "strategy": {
                        "rollingUpdate": {
                            "maxSurge": "25%",
                            "maxUnavailable": "25%"
                        }
                    },
                    "template": {
                        "metadata": {
                            "labels": labels
                        },
                        "spec": {
                            "containers": [{
                                "name":
                                manifest['service_name'],
                                "image":
                                manifest['image'],
                                "ports": [{
                                    "containerPort": manifest['port'],
                                    "protocol": "TCP"
                                }]
                            }]
                        }
                    }
                }
            }

            service = {
                "apiVersion": "v1",
                "kind": "Service",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    "type": "LoadBalancer",
                    "ports": [{
                        "port": 80,
                        "targetPort": manifest['port']
                    }],
                    "selector": manifest['labels']
                }
            }

            return deployment, service

        def frontend_service(self, manifest):
            labels = manifest['labels']
            deployment = {
                "apiVersion": "apps/v1",
                "kind": "Deployment",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    "replicas": manifest['replicas'],
                    "selector": {
                        "matchLabels": labels
                    },
                    "strategy": {
                        "rollingUpdate": {
                            "maxSurge": "25%",
                            "maxUnavailable": "25%"
                        }
                    },
                    "template": {
                        "metadata": {
                            "labels": labels
                        },
                        "spec": {
                            "containers": [{
                                "name":
                                manifest['service_name'],
                                "image":
                                manifest['image'],
                                "ports": [{
                                    "containerPort": manifest['port'],
                                    "protocol": "TCP"
                                }],
                                "env":
                                manifest['env']
                            }],
                        }
                    }
                }
            }

            service = {
                "apiVersion": "v1",
                "kind": "Service",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    #"type": "ClusterIP",
                    "ports": [{
                        "port": 80,
                        "targetPort": manifest['port'],
                        "protocol": "TCP"
                    }],
                    "selector":
                    manifest['labels']
                }
            }

            return deployment, service

        fargate_eks_ALB_ingress_manifest = {
            "apiVersion": "extensions/v1beta1",
            "kind": "Ingress",
            "metadata": {
                "name": "fargate-eks-alb-ingress",
                "namespace": "default",
                "annotations": {
                    "kubernetes.io/ingress.class": "alb",
                    "alb.ingress.kubernetes.io/scheme": "internet-facing",
                    "alb.ingress.kubernetes.io/target-type": "ip",
                    #"alb.ingress.kubernetes.io/listen-ports": "[{"HTTP": 80}, {"HTTP":5000}]"
                },
                "labels": manifest['labels'],
            },
            "spec": {
                "rules": [{
                    "http": {
                        "paths": [{
                            "path": "/*",
                            "backend": {
                                "serviceName": "flask",
                                "servicePort": 80
                            },
                        }],
                    },
                }],
            },
        }

        if self.manifest['service_type'] == 'backend':
            deployment_manifest, service_manifest = backend_service(
                self, self.manifest)
        elif self.manifest['service_type'] == 'frontend':
            deployment_manifest, service_manifest = frontend_service(
                self, self.manifest)

        aws_eks.KubernetesManifest(self,
                                   "DeploymentServiceManifest",
                                   cluster=self.cluster,
                                   manifest=[
                                       deployment_manifest, service_manifest,
                                       fargate_eks_ALB_ingress_manifest
                                   ])
Beispiel #6
0
    def __init__(self, scope: core.Construct, id: str, cluster,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.cluster = cluster

        iam_policy = aws_iam.PolicyStatement(
            actions=[
                "acm:DescribeCertificate", "acm:ListCertificates",
                "acm:GetCertificate", "ec2:AuthorizeSecurityGroupIngress",
                "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags",
                "ec2:DeleteSecurityGroup", "ec2:DescribeAccountAttributes",
                "ec2:DescribeAddresses", "ec2:DescribeInstances",
                "ec2:DescribeInstanceStatus", "ec2:DescribeInternetGateways",
                "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups",
                "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs",
                "ec2:ModifyInstanceAttribute",
                "ec2:ModifyNetworkInterfaceAttribute",
                "ec2:RevokeSecurityGroupIngress",
                "elasticloadbalancing:AddListenerCertificates",
                "elasticloadbalancing:AddTags",
                "elasticloadbalancing:CreateListener",
                "elasticloadbalancing:CreateLoadBalancer",
                "elasticloadbalancing:CreateRule",
                "elasticloadbalancing:CreateTargetGroup",
                "elasticloadbalancing:DeleteListener",
                "elasticloadbalancing:DeleteLoadBalancer",
                "elasticloadbalancing:DeleteRule",
                "elasticloadbalancing:DeleteTargetGroup",
                "elasticloadbalancing:DeregisterTargets",
                "elasticloadbalancing:DescribeListenerCertificates",
                "elasticloadbalancing:DescribeListeners",
                "elasticloadbalancing:DescribeLoadBalancers",
                "elasticloadbalancing:DescribeLoadBalancerAttributes",
                "elasticloadbalancing:DescribeRules",
                "elasticloadbalancing:DescribeSSLPolicies",
                "elasticloadbalancing:DescribeTags",
                "elasticloadbalancing:DescribeTargetGroups",
                "elasticloadbalancing:DescribeTargetGroupAttributes",
                "elasticloadbalancing:DescribeTargetHealth",
                "elasticloadbalancing:ModifyListener",
                "elasticloadbalancing:ModifyLoadBalancerAttributes",
                "elasticloadbalancing:ModifyRule",
                "elasticloadbalancing:ModifyTargetGroup",
                "elasticloadbalancing:ModifyTargetGroupAttributes",
                "elasticloadbalancing:RegisterTargets",
                "elasticloadbalancing:RemoveListenerCertificates",
                "elasticloadbalancing:RemoveTags",
                "elasticloadbalancing:SetIpAddressType",
                "elasticloadbalancing:SetSecurityGroups",
                "elasticloadbalancing:SetSubnets",
                "elasticloadbalancing:SetWebAcl",
                "iam:CreateServiceLinkedRole", "iam:GetServerCertificate",
                "iam:ListServerCertificates",
                "cognito-idp:DescribeUserPoolClient",
                "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL",
                "waf-regional:AssociateWebACL",
                "waf-regional:DisassociateWebACL", "tag:GetResources",
                "tag:TagResources", "waf:GetWebACL", "wafv2:GetWebACL",
                "wafv2:GetWebACLForResource", "wafv2:AssociateWebACL",
                "wafv2:DisassociateWebACL", "shield:DescribeProtection",
                "shield:GetSubscriptionState", "shield:DeleteProtection",
                "shield:CreateProtection", "shield:DescribeSubscription",
                "shield:ListProtections"
            ],
            resources=['*'],
        )

        eks_role = aws_iam.Role(
            self,
            "HandsOnClusterServiceAccountRole",
            assumed_by=aws_iam.ServicePrincipal('eks.amazonaws.com'),
        )

        eks_role.add_to_policy(iam_policy)

        cluster_role_manifest = {
            "apiVersion":
            "rbac.authorization.k8s.io/v1",
            "kind":
            "ClusterRole",
            "metadata": {
                "name": "alb-ingress-controller",
                "labels": {
                    "app.kubernetes.io/name": "alb-ingress-controller"
                }
            },
            "rules": [{
                "apiGroups": ["", "extensions"],
                "resources": [
                    "configmaps", "endpoints", "events", "ingresses",
                    "ingresses/status", "services", "pods/status", "nodes",
                    "pods", "secrets", "services", "namespaces"
                ],
                "verbs": ["create", "get", "list", "update", "watch", "patch"]
            }]
        }

        cluster_role_binding_manifest = {
            "apiVersion":
            "rbac.authorization.k8s.io/v1",
            "kind":
            "ClusterRoleBinding",
            "metadata": {
                "name": "alb-ingress-controller",
                "labels": {
                    "app.kubernetes.io/name": "alb-ingress-controller"
                }
            },
            "roleRef": {
                "apiGroup": "rbac.authorization.k8s.io",
                "kind": "ClusterRole",
                "name": "alb-ingress-controller"
            },
            "subjects": [{
                "kind": "ServiceAccount",
                "name": "alb-ingress-controller",
                "namespace": "kube-system"
            }]
        }

        service_account_manifest = {
            "apiVersion": "v1",
            "kind": "ServiceAccount",
            "metadata": {
                "name": "alb-ingress-controller",
                "labels": {
                    "app.kubernetes.io/name": "alb-ingress-controller"
                },
                "namespace": "kube-system",
                "annotations": {
                    "eks.amazonaws.com/role-arn": eks_role.role_arn
                }
            }
        }

        deployment_manifest = {
            "apiVersion": "apps/v1",
            "kind": "Deployment",
            "metadata": {
                "name": "alb-ingress-controller",
                "labels": {
                    "app.kubernetes.io/name": "alb-ingress-controller"
                },
                "namespace": "kube-system"
            },
            "spec": {
                "selector": {
                    "matchLabels": {
                        "app.kubernetes.io/name": "alb-ingress-controller"
                    }
                },
                "template": {
                    "metadata": {
                        "labels": {
                            "app.kubernetes.io/name": "alb-ingress-controller"
                        }
                    },
                    "spec": {
                        "containers": [{
                            "name":
                            "alb-ingress-controller",
                            "args": [
                                "--ingress-class=alb",
                                "--cluster-name={}".format(
                                    self.cluster.cluster_name),
                                "--aws-vpc-id={}".format(
                                    self.cluster.vpc.vpc_id),
                                "--aws-region={}".format(
                                    self.cluster.vpc.env.region),
                                "--aws-api-debug"
                            ],
                            "image":
                            "docker.io/amazon/aws-alb-ingress-controller:v1.1.8",
                        }],
                        "serviceAccountName":
                        "alb-ingress-controller"
                    }
                }
            }
        }

        service_acct = aws_eks.ServiceAccount(self,
                                              "alb-ingress-controller",
                                              cluster=self.cluster,
                                              name="alb-ingress-controller",
                                              namespace="kube-system")

        service_acct.add_to_principal_policy(statement=iam_policy)

        alb_ingress_access_manifests = aws_eks.KubernetesManifest(
            self,
            "ClusterRoleALB",
            cluster=self.cluster,
            manifest=[
                cluster_role_manifest, cluster_role_binding_manifest,
                service_account_manifest
            ])

        alb_ingress_deployment = aws_eks.KubernetesManifest(
            self,
            "ALBIngressDeployment",
            cluster=self.cluster,
            manifest=[deployment_manifest])
	def __init__(self, scope: core.Construct, id: str, elastic: Elastic, vpc: ec2.Vpc, roles: list, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		sm_policy = iam.PolicyStatement(
			actions=["secretsmanager:GetSecretValue"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.secret.secret_arn]
		)

		es_policy = iam.PolicyStatement(
			actions=["es:DescribeElasticsearchDomain"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.domain.domain_arn]
		)

		function = lbd.SingletonFunction(
			self,
			"ElasticsearchConfigFunction",
			uuid="e579d5f9-1709-43ea-b75f-9d1452ca7690",
			code=lbd.Code.from_asset(
				"custom_resources/elasticsearch/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="elasticsearchConfig",
			initial_policy=[sm_policy,es_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			security_group=ec2.SecurityGroup.from_security_group_id(self, "lambdaVPC", vpc.vpc_default_security_group),
			timeout=core.Duration.seconds(30),
			vpc=vpc,
			vpc_subnets=ec2.SubnetSelection(
				one_per_az=True
			)
		)

		provider = cr.Provider(
			self, "ElasticsearchConfigProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)

		core.CustomResource(
			self, "ElasticSearchConfig", 
			service_token=provider.service_token,
			properties={
				"domain": elastic.domain.domain_name,
				"secret": elastic.secret.secret_arn,
				"roles": [role.role_arn for role in roles],
				"shards": self.node.try_get_context("elastic")['shards'],
				"user": boto3.client('sts').get_caller_identity().get('Arn'),
				"replicas": self.node.try_get_context("elastic")['replicas']
			}
		)

		manifests = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']:
			manifests.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "elasticsearch",
					"namespace": namespace
				},
				"data": {
					"url": elastic.domain.domain_endpoint
				}
			})
		eks.KubernetesManifest(
			self, 
			"elastic-search-cm", 
			cluster=cluster,
			manifest=manifests
		)		
Beispiel #8
0
    def __init__(self, scope: core.Construct, id: str, VPC: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster_admin = iam.Role(self,
                                 "AdminRole",
                                 assumed_by=iam.AccountRootPrincipal())

        self.cluster = eks.Cluster(self,
                                   "cluster",
                                   default_capacity=self.node.try_get_context(
                                       "kubernetes")["default_capacity"],
                                   default_capacity_instance=ec2.InstanceType(
                                       self.node.try_get_context("kubernetes")
                                       ["default_capacity_instance"]),
                                   cluster_name="statement-demo",
                                   vpc=VPC,
                                   vpc_subnets=VPC.private_subnets,
                                   masters_role=cluster_admin,
                                   version=eks.KubernetesVersion.V1_17,
                                   endpoint_access=eks.EndpointAccess.PRIVATE)

        vpc_security_group = ec2.SecurityGroup.from_security_group_id(
            self, "sgVPC", VPC.vpc_default_security_group)
        eks_security_group = ec2.SecurityGroup.from_security_group_id(
            self, "sgEKS", self.cluster.cluster_security_group_id)

        vpc_security_group.add_ingress_rule(eks_security_group,
                                            ec2.Port.all_traffic())

        eks_security_group.add_ingress_rule(vpc_security_group,
                                            ec2.Port.all_traffic())

        self.cluster.default_nodegroup.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "CloudWatchAgentServerPolicy"))

        #see https://github.com/kubernetes/kubernetes/issues/61486?#issuecomment-635169272
        eks.KubernetesPatch(
            self,
            "patch",
            cluster=self.cluster,
            resource_name="daemonset/kube-proxy",
            resource_namespace="kube-system",
            apply_patch={
                "spec": {
                    "template": {
                        "spec": {
                            "containers": [{
                                "name":
                                "kube-proxy",
                                "command": [
                                    "kube-proxy",
                                    "--v=2",
                                    "--hostname-override=$(NODE_NAME)",
                                    "--config=/var/lib/kube-proxy-config/config",
                                ],
                                "env": [{
                                    "name": "NODE_NAME",
                                    "valueFrom": {
                                        "fieldRef": {
                                            "apiVersion": "v1",
                                            "fieldPath": "spec.nodeName"
                                        }
                                    }
                                }]
                            }]
                        }
                    }
                }
            },
            restore_patch={
                "spec": {
                    "template": {
                        "spec": {
                            "containers": [{
                                "name":
                                "kube-proxy",
                                "command": [
                                    "kube-proxy", "--v=2",
                                    "--config=/var/lib/kube-proxy-config/config"
                                ]
                            }]
                        }
                    }
                }
            })

        # elasticsearch clusters has many nodes, and its DNS records are always truncated by OpenDNS
        eks.KubernetesPatch(
            self,
            "coreDNSTCP",
            cluster=self.cluster,
            resource_name="configmap/coredns",
            resource_namespace="kube-system",
            apply_patch={
                "data": {
                    "Corefile":
                    ".:53 {\n    errors\n    health\n    kubernetes cluster.local in-addr.arpa ip6.arpa {\n      pods insecure\n      upstream\n      fallthrough in-addr.arpa ip6.arpa\n    }\n    prometheus :9153\n    forward . /etc/resolv.conf {\n      force_tcp\n    }\n    cache 30\n    loop\n    reload\n    loadbalance\n}\n"
                }
            },
            restore_patch={
                "data": {
                    "Corefile":
                    ".:53 {\n    errors\n    health\n    kubernetes cluster.local in-addr.arpa ip6.arpa {\n      pods insecure\n      upstream\n      fallthrough in-addr.arpa ip6.arpa\n    }\n    prometheus :9153\n    forward . /etc/resolv.conf\n    cache 30\n    loop\n    reload\n    loadbalance\n}\n"
                }
            })

        # adding myself as a cluster admin
        self.cluster.aws_auth.add_user_mapping(iam.User.from_user_name(
            self, "me",
            boto3.client('sts').get_caller_identity().get('Arn').partition('/')
            [2]),
                                               groups=["system:masters"])

        text = requests.get(
            "https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/quickstart/cwagent-fluentd-quickstart.yaml"
        ).text.replace("{{cluster_name}}", self.cluster.cluster_name).replace(
            "{{region_name}}",
            core.Stack.of(self).region)
        eks.KubernetesManifest(
            self,
            "containerInsights",
            cluster=self.cluster,
            manifest=[yaml.load(item) for item in text.split("---\n")])
    def __init__(self, scope: core.Construct, id: str, cluster, manifest,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.cluster = cluster
        self.manifest = manifest

        def backend_service(self, manifest):
            labels = manifest['labels']
            deployment = {
                "apiVersion": "apps/v1",
                "kind": "Deployment",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    "replicas": manifest['replicas'],
                    "selector": {
                        "matchLabels": labels
                    },
                    "strategy": {
                        "rollingUpdate": {
                            "maxSurge": "25%",
                            "maxUnavailable": "25%"
                        }
                    },
                    "template": {
                        "metadata": {
                            "labels": labels
                        },
                        "spec": {
                            "containers": [{
                                "name":
                                manifest['service_name'],
                                "image":
                                manifest['image'],
                                "ports": [{
                                    "containerPort": manifest['port'],
                                    "protocol": "TCP"
                                }]
                            }]
                        }
                    }
                }
            }

            service = {
                "apiVersion": "v1",
                "kind": "Service",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    "type": "LoadBalancer",
                    "ports": [{
                        "port": 80,
                        "targetPort": manifest['port']
                    }],
                    "selector": manifest['labels']
                }
            }

            return deployment, service

        def frontend_service(self, manifest):
            labels = manifest['labels']
            deployment = {
                "apiVersion": "apps/v1",
                "kind": "Deployment",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    "replicas": manifest['replicas'],
                    "selector": {
                        "matchLabels": labels
                    },
                    "strategy": {
                        "rollingUpdate": {
                            "maxSurge": "25%",
                            "maxUnavailable": "25%"
                        }
                    },
                    "template": {
                        "metadata": {
                            "labels": labels
                        },
                        "spec": {
                            "containers": [{
                                "name":
                                manifest['service_name'],
                                "image":
                                manifest['image'],
                                "ports": [{
                                    "containerPort": manifest['port'],
                                    "protocol": "TCP"
                                }],
                                "env":
                                manifest['env']
                            }],
                        }
                    }
                }
            }

            service = {
                "apiVersion": "v1",
                "kind": "Service",
                "metadata": {
                    "name": manifest['service_name'],
                    "namespace": "default"
                },
                "spec": {
                    "type": "LoadBalancer",
                    "ports": [{
                        "port": 80,
                        "targetPort": manifest['port']
                    }],
                    "selector": manifest['labels']
                }
            }

            return deployment, service

        if self.manifest['service_type'] == 'backend':
            deployment_manifest, service_manifest = backend_service(
                self, self.manifest)
        elif self.manifest['service_type'] == 'frontend':
            deployment_manifest, service_manifest = frontend_service(
                self, self.manifest)

        aws_eks.KubernetesManifest(
            self,
            "NodeJSManifest",
            cluster=self.cluster,
            manifest=[deployment_manifest, service_manifest])
    def __init__(self, scope: core.Construct, id: str, eksname: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Cloudformation input params
        datalake_bucket = core.CfnParameter(
            self,
            "datalakebucket",
            type="String",
            description=
            "You existing S3 bucket to be accessed by Jupyter Notebook and ETL job. Default: blank",
            default="")
        login_name = core.CfnParameter(
            self,
            "jhubuser",
            type="String",
            description="Your username login to jupyter hub",
            default="sparkoneks")

        # Auto-generate a user login in secrets manager
        jhub_secret = secmger.Secret(
            self,
            'jHubPwd',
            generate_secret_string=secmger.SecretStringGenerator(
                exclude_punctuation=True,
                secret_string_template=json.dumps(
                    {'username': login_name.value_as_string}),
                generate_string_key="password"))

        # A new bucket to store app code and access logs
        self.app_s3 = S3AppCodeConst(self, 'appcode')

        # 1. Setup EKS base infrastructure
        network_sg = NetworkSgConst(self, 'network-sg', eksname,
                                    self.app_s3.code_bucket)
        iam = IamConst(self, 'iam_roles', eksname)
        eks_cluster = EksConst(self, 'eks_cluster', eksname, network_sg.vpc,
                               iam.managed_node_role, iam.admin_role,
                               self.region)
        eks_security = EksSAConst(self, 'eks_sa', eks_cluster.my_cluster,
                                  jhub_secret)
        eks_base_app = EksBaseAppConst(self, 'eks_base_app',
                                       eks_cluster.my_cluster, self.region)

        # 2. Setup Spark application access control
        app_security = SparkOnEksSAConst(self, 'spark_service_account',
                                         eks_cluster.my_cluster,
                                         login_name.value_as_string,
                                         self.app_s3.code_bucket,
                                         datalake_bucket.value_as_string)

        # 3. Install ETL orchestrator - Argo
        # can be replaced by other workflow tool, ie. Airflow
        argo_install = eks_cluster.my_cluster.add_helm_chart(
            'ARGOChart',
            chart='argo',
            repository='https://argoproj.github.io/argo-helm',
            release='argo',
            namespace='argo',
            create_namespace=True,
            values=loadYamlLocal('../app_resources/argo-values.yaml'))
        # Create a Spark workflow template with different T-shirt size
        submit_tmpl = eks_cluster.my_cluster.add_manifest(
            'SubmitSparkWrktmpl',
            loadYamlLocal('../app_resources/spark-template.yaml'))
        submit_tmpl.node.add_dependency(argo_install)

        # 4. Install Arc Jupyter notebook to as Spark ETL IDE
        jhub_install = eks_cluster.my_cluster.add_helm_chart(
            'JHubChart',
            chart='jupyterhub',
            repository='https://jupyterhub.github.io/helm-chart',
            release='jhub',
            version='0.11.1',
            namespace='jupyter',
            create_namespace=False,
            values=loadYamlReplaceVarLocal(
                '../app_resources/jupyter-values.yaml',
                fields={
                    "{{codeBucket}}": self.app_s3.code_bucket,
                    "{{region}}": self.region
                }))

        # get Arc Jupyter login from secrets manager
        name_parts = core.Fn.split('-', jhub_secret.secret_name)
        name_no_suffix = core.Fn.join(
            '-',
            [core.Fn.select(0, name_parts),
             core.Fn.select(1, name_parts)])

        config_hub = eks.KubernetesManifest(
            self,
            'JHubConfig',
            cluster=eks_cluster.my_cluster,
            manifest=loadYamlReplaceVarLocal(
                '../app_resources/jupyter-config.yaml',
                fields={
                    "{{MY_SA}}": app_security.jupyter_sa,
                    "{{REGION}}": self.region,
                    "{{SECRET_NAME}}": name_no_suffix
                },
                multi_resource=True))
        config_hub.node.add_dependency(jhub_install)

        # 5.(OPTIONAL) retrieve ALB DNS Name to enable Cloudfront in the following nested stack.
        # Recommend to remove this section and the rest of CloudFront component.
        # Setup your own certificate then add to ALB, to enable the HTTPS.
        self._argo_alb = eks.KubernetesObjectValue(
            self,
            'argoALB',
            cluster=eks_cluster.my_cluster,
            json_path='.status.loadBalancer.ingress[0].hostname',
            object_type='ingress',
            object_name='argo-server',
            object_namespace='argo')
        self._argo_alb.node.add_dependency(argo_install)

        self._jhub_alb = eks.KubernetesObjectValue(
            self,
            'jhubALB',
            cluster=eks_cluster.my_cluster,
            json_path='.status.loadBalancer.ingress[0].hostname',
            object_type='ingress',
            object_name='jupyterhub',
            object_namespace='jupyter')
        self._jhub_alb.node.add_dependency(config_hub)
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        stack_log_level: str,
        eks_cluster,
        sales_event_bkt,
        **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):

        self.reliable_q = _sqs.Queue(
            self,
            "reliableQueue01",
            delivery_delay=cdk.Duration.seconds(2),
            queue_name=f"reliable_message_q",
            retention_period=cdk.Duration.days(2),
            visibility_timeout=cdk.Duration.seconds(30)
        )

        # Grant our EKS Node Producer privileges to write to SQS
        # Due to cyclic dependency should be done before the EKS cluster is created
        # self.reliable_q.grant_send_messages(_eks_node_role)

        ########################################
        #######                          #######
        #######   Stream Data Producer   #######
        #######                          #######
        ########################################

        app_grp_name = "sales-events-producer"
        app_grp_label = {"app": f"{app_grp_name}"}

        app_grp_ns = eks_cluster.add_manifest(
            f"{app_grp_name}-ns-01",
            {
                "apiVersion": "v1",
                "kind": "Namespace",
                "metadata": {
                        "name": f"{app_grp_name}-ns",
                        "labels": {
                            "name": f"{app_grp_name}-ns"
                        }
                }
            }
        )

        ####### APP 01 #######

        app_01_producer_deployment = {
            "apiVersion": "apps/v1",
            "kind": "Deployment",
            "metadata": {
                "name": f"{app_grp_name}",
                "namespace": f"{app_grp_name}-ns"
            },
            "spec": {
                "replicas": 1,
                "selector": {"matchLabels": app_grp_label},
                "template": {
                    "metadata": {"labels": app_grp_label},
                    "spec": {
                        "containers": [
                            {
                                "name": f"{app_grp_name}",
                                "image": "python:3.8.10-alpine",
                                "command": [
                                    "sh",
                                    "-c"
                                ],
                                "args": [
                                    "wget https://raw.githubusercontent.com/miztiik/event-processor-on-eks/master/stacks/back_end/eks_sqs_producer_stack/lambda_src/stream_data_producer.py;pip3 install --user boto3;python3 stream_data_producer.py;"
                                ],
                                "env": [
                                    {
                                        "name": "STORE_EVENTS_BKT",
                                        "value": f"{sales_event_bkt.bucket_name}"
                                    },
                                    {
                                        "name": "S3_PREFIX",
                                        "value": "sales_events"
                                    },
                                    {
                                        "name": "RELIABLE_QUEUE_NAME",
                                        "value": f"{self.reliable_q.queue_name}"
                                    },
                                    {
                                        "name": "AWS_REGION",
                                        "value": f"{cdk.Aws.REGION}"
                                    },
                                    {
                                        "name": "TOT_MSGS_TO_PRODUCE",
                                        "value": "10000"
                                    },
                                    {
                                        "name": "WAIT_SECS_BETWEEN_MSGS",
                                        "value": "2"
                                    }
                                ]
                            }
                        ]
                    }
                }
            }
        }

        # apply a kubernetes manifest to the cluster
        app_01_manifest = _eks.KubernetesManifest(
            self,
            "miztSalesEventproducerSvc",
            cluster=eks_cluster,
            manifest=[
                app_01_producer_deployment,
            ]
        )

        app_01_manifest.node.add_dependency(
            app_grp_ns)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page.",
        )

        output_1 = cdk.CfnOutput(
            self,
            "ReliableMessageQueue",
            value=f"https://console.aws.amazon.com/sqs/v2/home?region={cdk.Aws.REGION}#/queues",
            description="Reliable Message Queue"
        )
Beispiel #12
0
    def __init__(self, scope: core.Construct, id: str, eks_cluster, service,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.eks_cluster = eks_cluster
        self.service = service

        def my_demo_service(self, service):
            labels = service['labels']

            deployment = {
                "apiVersion": "apps/v1",
                "kind": "Deployment",
                "metadata": {
                    "name": service['service_name'],
                    "namespace": service['namespace']
                },
                "spec": {
                    "replicas": service['replicas'],
                    "selector": {
                        "matchLabels": labels
                    },
                    "template": {
                        "metadata": {
                            "labels": labels
                        },
                        "spec": {
                            "containers": [{
                                "name":
                                service['service_name'],
                                "image":
                                service['image'],
                                "ports": [{
                                    "containerPort": service['port'],
                                    "protocol": "TCP"
                                }]
                            }],
                        }
                    }
                }
            }

            service = {
                "apiVersion": "v1",
                "kind": "Service",
                "metadata": {
                    "name": service['service_name'],
                    "namespace": service['namespace']
                },
                "spec": {
                    "type": "LoadBalancer",
                    "ports": [{
                        "port": 80,
                        "targetPort": service['port']
                    }],
                    "selector": service['labels']
                }
            }

            namespace = {
                "apiVersion": "v1",
                "kind": "Namespace",
                "metadata": {
                    "name": "eksdemo",
                    "labels": {
                        "name": "eksdemo"
                    }
                }
            }

            return deployment, service, namespace

        deployment_manifest, service_manifest, namespace_manifest = my_demo_service(
            self, self.service)

        eks.KubernetesManifest(self,
                               "MySampleService-",
                               cluster=self.eks_cluster,
                               manifest=[
                                   namespace_manifest, deployment_manifest,
                                   service_manifest
                               ])
Beispiel #13
0
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        stack_log_level: str,
        eks_cluster,
        reliable_q,
        sales_event_bkt,
        **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):

        ########################################
        #######                          #######
        #######   Stream Data consumer   #######
        #######                          #######
        ########################################

        app_grp_name = "sales-events-consumer"
        app_grp_label = {"app": f"{app_grp_name}"}

        app_grp_ns = eks_cluster.add_manifest(
            f"{app_grp_name}-ns-01",
            {
                "apiVersion": "v1",
                "kind": "Namespace",
                "metadata": {
                        "name": f"{app_grp_name}-ns",
                        "labels": {
                            "name": f"{app_grp_name}-ns"
                        }
                }
            }
        )

        app_01_consumer_deployment = {
            "apiVersion": "apps/v1",
            "kind": "Deployment",
            "metadata": {
                "name": f"{app_grp_name}",
                "namespace": f"{app_grp_name}-ns"
            },
            "spec": {
                "replicas": 1,
                "selector": {"matchLabels": app_grp_label},
                "template": {
                    "metadata": {"labels": app_grp_label},
                    "spec": {
                        "containers": [
                            {
                                "name": f"{app_grp_name}",
                                "image": "python:3.8.10-alpine",
                                "command": [
                                    "sh",
                                    "-c"
                                ],
                                "args": [
                                    "wget https://raw.githubusercontent.com/miztiik/event-processor-on-eks/master/stacks/back_end/eks_sqs_consumer_stack/lambda_src/stream_data_consumer.py;pip3 install --user boto3;python3 stream_data_consumer.py;"
                                ],
                                "env":
                                [
                                    {
                                        "name": "STORE_EVENTS_BKT",
                                        "value": f"{sales_event_bkt.bucket_name}"
                                    },
                                    {
                                        "name": "S3_PREFIX",
                                        "value": "sales_events"
                                    },
                                    {
                                        "name": "RELIABLE_QUEUE_NAME",
                                        "value": f"{reliable_q.queue_name}"
                                    },
                                    {
                                        "name": "AWS_REGION",
                                        "value": f"{cdk.Aws.REGION}"
                                    },
                                    {
                                        "name": "MAX_MSGS_PER_BATCH",
                                        "value": "5"
                                    },
                                    {
                                        "name": "MSG_POLL_BACKOFF",
                                        "value": "2"
                                    },
                                    {
                                        "name": "MSG_PROCESS_DELAY",
                                        "value": "10"
                                    },
                                    {
                                        "name": "TOT_MSGS_TO_PROCESS",
                                        "value": "10000"
                                    }
                                ]
                            }
                        ]
                    }
                }
            }
        }

        # apply a kubernetes manifest to the cluster
        app_01_manifest = _eks.KubernetesManifest(
            self,
            "miztSalesEventConsumerSvc",
            cluster=eks_cluster,
            manifest=[
                app_01_consumer_deployment
            ]
        )

        app_01_manifest.node.add_dependency(
            app_grp_ns)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page.",
        )
    def __init__(self, scope: core.Construct, id: str, eksname: str,
                 solution_id: str, version: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.template_options.description = "(SO0141) SQL based ETL with Apache Spark on Amazon EKS. This solution provides a SQL based ETL option with a open-source declarative framework powered by Apache Spark."
        source_dir = os.path.split(os.environ['VIRTUAL_ENV'])[0] + '/source'

        # Cloudformation input params
        datalake_bucket = core.CfnParameter(
            self,
            "datalakebucket",
            type="String",
            description=
            "Your existing S3 bucket to be accessed by Jupyter Notebook and ETL job. Default: blank",
            default="")
        login_name = core.CfnParameter(
            self,
            "jhubuser",
            type="String",
            description="Your username login to jupyter hub",
            default="sparkoneks")

        # Auto-generate a user login in secrets manager
        key = kms.Key(self,
                      'KMSKey',
                      removal_policy=core.RemovalPolicy.DESTROY,
                      enable_key_rotation=True)
        key.add_alias("alias/secretsManager")
        jhub_secret = secmger.Secret(
            self,
            'jHubPwd',
            generate_secret_string=secmger.SecretStringGenerator(
                exclude_punctuation=True,
                secret_string_template=json.dumps(
                    {'username': login_name.value_as_string}),
                generate_string_key="password"),
            removal_policy=core.RemovalPolicy.DESTROY,
            encryption_key=key)

        # 1. a new bucket to store app code and logs
        self.app_s3 = S3AppCodeConst(self, 'appcode')

        # 2. push docker image to ECR via AWS CICD pipeline
        ecr_image = DockerPipelineConstruct(self, 'image',
                                            self.app_s3.artifact_bucket)
        ecr_image.node.add_dependency(self.app_s3)
        core.CfnOutput(self, 'IMAGE_URI', value=ecr_image.image_uri)

        # 3. EKS base infrastructure
        network_sg = NetworkSgConst(self, 'network-sg', eksname,
                                    self.app_s3.code_bucket)
        iam = IamConst(self, 'iam_roles', eksname)
        eks_cluster = EksConst(self, 'eks_cluster', eksname, network_sg.vpc,
                               iam.managed_node_role, iam.admin_role)
        EksSAConst(self, 'eks_sa', eks_cluster.my_cluster, jhub_secret)
        base_app = EksBaseAppConst(self, 'eks_base_app',
                                   eks_cluster.my_cluster)

        # 4. Spark app access control
        app_security = SparkOnEksSAConst(self, 'spark_service_account',
                                         eks_cluster.my_cluster,
                                         login_name.value_as_string,
                                         self.app_s3.code_bucket,
                                         datalake_bucket.value_as_string)
        app_security.node.add_dependency(base_app.secret_created)
        # 5. Install Arc Jupyter notebook in EKS
        jhub_install = eks_cluster.my_cluster.add_helm_chart(
            'JHubChart',
            chart='jupyterhub',
            repository='https://jupyterhub.github.io/helm-chart',
            release='jhub',
            version='0.11.1',
            namespace='jupyter',
            create_namespace=False,
            values=load_yaml_replace_var_local(
                source_dir + '/app_resources/jupyter-values.yaml',
                fields={
                    "{{codeBucket}}": self.app_s3.code_bucket,
                    "{{region}}": core.Aws.REGION
                }))
        jhub_install.node.add_dependency(app_security)
        # EKS get Jupyter login dynamically from secrets manager
        name_parts = core.Fn.split('-', jhub_secret.secret_name)
        name_no_suffix = core.Fn.join(
            '-',
            [core.Fn.select(0, name_parts),
             core.Fn.select(1, name_parts)])

        config_hub = eks.KubernetesManifest(
            self,
            'JHubConfig',
            cluster=eks_cluster.my_cluster,
            manifest=load_yaml_replace_var_local(
                source_dir + '/app_resources/jupyter-config.yaml',
                fields={
                    "{{MY_SA}}": app_security.jupyter_sa,
                    "{{REGION}}": core.Aws.REGION,
                    "{{SECRET_NAME}}": name_no_suffix
                },
                multi_resource=True))
        config_hub.node.add_dependency(jhub_install)

        # 6. Install ETL orchestrator - Argo in EKS
        # can be replaced by other workflow tool, eg. Airflow
        argo_install = eks_cluster.my_cluster.add_helm_chart(
            'ARGOChart',
            chart='argo-workflows',
            repository='https://argoproj.github.io/argo-helm',
            release='argo',
            version='0.1.4',
            namespace='argo',
            create_namespace=True,
            values=load_yaml_local(source_dir +
                                   '/app_resources/argo-values.yaml'))
        argo_install.node.add_dependency(config_hub)
        # Create argo workflow template for Spark with T-shirt size
        submit_tmpl = eks_cluster.my_cluster.add_manifest(
            'SubmitSparkWrktmpl',
            load_yaml_local(source_dir + '/app_resources/spark-template.yaml'))
        submit_tmpl.node.add_dependency(argo_install)

        # 7. (OPTIONAL) retrieve ALB DNS Name to enable CloudFront in the nested stack.
        # It is used to serve HTTPS requests with its default domain name.
        # Recommend to issue your own TLS certificate, and delete the CF components.
        self._jhub_alb = eks.KubernetesObjectValue(
            self,
            'jhubALB',
            cluster=eks_cluster.my_cluster,
            json_path='..status.loadBalancer.ingress[0].hostname',
            object_type='ingress.networking',
            object_name='jupyterhub',
            object_namespace='jupyter',
            timeout=core.Duration.minutes(10))
        self._jhub_alb.node.add_dependency(config_hub)

        self._argo_alb = eks.KubernetesObjectValue(
            self,
            'argoALB',
            cluster=eks_cluster.my_cluster,
            json_path='..status.loadBalancer.ingress[0].hostname',
            object_type='ingress.networking',
            object_name='argo-argo-workflows-server',
            object_namespace='argo',
            timeout=core.Duration.minutes(10))
        self._argo_alb.node.add_dependency(argo_install)

        # 8. (OPTIONAL) Send solution metrics to AWS
        # turn it off from the CloudFormation mapping section if prefer.
        send_metrics = solution_metrics.SendAnonymousData(
            self,
            "SendMetrics",
            network_sg.vpc,
            self.app_s3.artifact_bucket,
            self.app_s3.s3_deploy_contrust,
            metrics={
                "Solution":
                solution_id,
                "Region":
                core.Aws.REGION,
                "SolutionVersion":
                version,
                "UUID":
                "MY_UUID",
                "UseDataLakeBucket":
                "True" if not datalake_bucket.value_as_string else "False",
                "UseAWSCICD":
                "True" if ecr_image.image_uri else "False",
                "NoAZs":
                len(network_sg.vpc.availability_zones)
            })
        send_metrics.node.add_dependency(self.app_s3.s3_deploy_contrust)

        # 9. (OPTIONAL) Override the cfn Nag rules for AWS Solution CICD deployment
        # remove the section if your CI/CD pipeline doesn't use the cfn_nag utility to validate the CFN.
        k8s_ctl_node = self.node.find_child(
            '@aws-cdk--aws-eks.KubectlProvider')
        cluster_resrc_node = self.node.find_child(
            '@aws-cdk--aws-eks.ClusterResourceProvider')
        scan.suppress_cfnnag_rule(
            'W12', 'by default the role has * resource',
            self.node.find_child('eks_cluster').node.find_child('EKS').node.
            default_child.node.find_child('CreationRole').node.find_child(
                'DefaultPolicy').node.default_child)
        scan.suppress_cfnnag_rule(
            'W11', 'by default the role has * resource',
            self.node.find_child(
                'Custom::AWSCDKOpenIdConnectProviderCustomResourceProvider').
            node.find_child('Role'))
        scan.suppress_lambda_cfnnag_rule(
            k8s_ctl_node.node.find_child('Handler').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            k8s_ctl_node.node.find_child('Provider').node.find_child(
                'framework-onEvent').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child(
                'Custom::CDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756C').
            node.default_child)
        # scan.suppress_lambda_cfnnag_rule(self.node.find_child('Custom::S3AutoDeleteObjectsCustomResourceProvider').node.find_child('Handler'))
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child(
                'Custom::AWSCDKOpenIdConnectProviderCustomResourceProvider').
            node.find_child('Handler'))
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child('AWSCDKCfnUtilsProviderCustomResourceProvider'
                                 ).node.find_child('Handler'))
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child(
                'OnEventHandler').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child(
                'IsCompleteHandler').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child('Provider').node.find_child(
                'framework-isComplete').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child('Provider').node.find_child(
                'framework-onTimeout').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            cluster_resrc_node.node.find_child('Provider').node.find_child(
                'framework-onEvent').node.default_child)
        scan.suppress_network_cfnnag_rule(
            self.node.find_child('eks_cluster').node.find_child('EKS').node.
            find_child('ControlPlaneSecurityGroup').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child('SendMetrics').node.find_child(
                'LambdaProvider').node.find_child(
                    'framework-onEvent').node.default_child)
        scan.suppress_network_cfnnag_rule(
            self.node.find_child('SendMetrics').node.find_child(
                'LambdaProvider').node.find_child('framework-onEvent').node.
            find_child('SecurityGroup').node.default_child)
        scan.suppress_lambda_cfnnag_rule(
            self.node.find_child(
                'SingletonLambda75248a819138468c9ba1bca6c7137599').node.
            default_child)
        scan.suppress_network_cfnnag_rule(
            self.node.find_child(
                'SingletonLambda75248a819138468c9ba1bca6c7137599').node.
            find_child('SecurityGroup').node.default_child)
Beispiel #15
0
	def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		maps= []
		self.roles=[]

		ecr_policy = iam.PolicyStatement(
			actions=[
				"ecr:DescribeImages",
				"ecr:ListImages",
				"ecr:BatchDeleteImage"
			], 
			effect=iam.Effect.ALLOW, 
			resources=[
				"arn:aws:ecr:%s:%s:repository/%s" % (core.Stack.of(self).region, core.Stack.of(self).account, namespace) for namespace in self.node.try_get_context("kubernetes")['namespaces']
			]
		)

		function = lbd.SingletonFunction(
			self,
			"ECRDeleteImagesFunction",
			uuid="19411b0e-0e80-4ad4-a316-3235940775e4",
			code=lbd.Code.from_asset(
				"custom_resources/kubernetes/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="kubernetesConfig",
			initial_policy=[ecr_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			timeout=core.Duration.seconds(30)
		)

		provider = cr.Provider(
			self, "ECRDeleteImagesFunctionProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)


		repositores = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']: 
			manifest = cluster.add_manifest(
				"eksConfigNamespace-%s" % namespace,
				{
					"apiVersion": "v1",
					"kind": "Namespace",
					"metadata": {
						"name": namespace
					}
				}
			)

			sa = cluster.add_service_account(
				"service-account-%s" % namespace,
				name="statement-demo",
				namespace=namespace
			)
			sa.node.add_dependency(manifest)
			self.roles.append(sa.role)

			repository = ecr.Repository(
				self, ("repository-%s" % namespace),
				removal_policy=core.RemovalPolicy.DESTROY,
				repository_name=namespace,
				lifecycle_rules=[ecr.LifecycleRule(max_image_count=1)]
			)

			repositores.append(repository.repository_arn)

			maps.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "application.properties",
					"namespace": namespace
				},
				"data": {
					"application-aws.properties":  Path("../%s/src/main/resources/application-aws.properties" % namespace).read_text()
				}
			})

			core.CustomResource(
				self, "ECRDeleteImagesFunction-%s" % namespace, 
				service_token=provider.service_token,
				properties={
					"repository": namespace
				}
			).node.add_dependency(repository)

		eks.KubernetesManifest(
			self, 
			"eksConfigMaps", 
			cluster=cluster, 
			manifest=maps
		)

		iam.Policy(
			self, "saPolicy", 
			force=True, 
			policy_name="EKSSAPolicy", 
			roles=self.roles, 
			statements=[
				iam.PolicyStatement(
					actions=["cloudwatch:PutMetricData"], 
					conditions={
						"StringEquals": {
							"cloudwatch:namespace": "statement12"
						},
					},
					resources=["*"]
				)
			]
		)
    def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster,
                 kafka: msk.CfnCluster, vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pip.main([
            "install", "--system", "--target", "custom_resources/kafka/lib",
            "kafka-python"
        ])
        arn = cr.AwsCustomResource(
            self,
            'clusterArn',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']),
            on_create=cr.AwsSdkCall(
                action='listClusters',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of(
                    "ClusterNameFilter"),
                parameters={
                    "ClusterNameFilter": kafka.cluster_name,
                    "MaxResults": 1
                },
            ),
        )

        bootstraps = cr.AwsCustomResource(
            self,
            'clusterBootstraps',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=["*"]),
            on_create=cr.AwsSdkCall(
                action='getBootstrapBrokers',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of("ClusterArn"),
                parameters={
                    "ClusterArn":
                    arn.get_response_field("ClusterInfoList.0.ClusterArn")
                },
            ),
        )

        manifests = []
        for namespace in self.node.try_get_context("kubernetes")['namespaces']:
            manifests.append({
                "apiVersion": "v1",
                "kind": "ConfigMap",
                "metadata": {
                    "name": "kafka",
                    "namespace": namespace
                },
                "data": {
                    "bootstrap":
                    bootstraps.get_response_field('BootstrapBrokerStringTls'),
                }
            })
        eks.KubernetesManifest(self,
                               "kafka-config",
                               cluster=cluster,
                               manifest=manifests)

        function = lbd.SingletonFunction(
            self,
            "KafkaConfigFunction",
            uuid="b09329a3-5206-46f7-822f-337da714aeac",
            code=lbd.Code.from_asset("custom_resources/kafka/"),
            handler="config.handler",
            runtime=lbd.Runtime.PYTHON_3_7,
            function_name="kafkaConfig",
            log_retention=logs.RetentionDays.ONE_DAY,
            security_group=ec2.SecurityGroup.from_security_group_id(
                self, "lambdaKafkaVPC", vpc.vpc_default_security_group),
            timeout=core.Duration.seconds(30),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(one_per_az=True))

        provider = cr.Provider(self,
                               "KafkaConfigProvider",
                               on_event_handler=function,
                               log_retention=logs.RetentionDays.ONE_DAY)

        core.CustomResource(
            self,
            "KafkaLoadTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "load",
                "partitions":
                150,
                "replicas":
                1
            })

        core.CustomResource(
            self,
            "KafkaGenerateTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "generate",
                "partitions":
                200,
                "replicas":
                1
            })