Example #1
0
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

import iam

# IAM roles for the node groups.
role0 = iam.create_role("example-role0")
role1 = iam.create_role("example-role1")
role2 = iam.create_role("example-role2")

# Create an EKS cluster.
cluster = eks.Cluster("example-managed-nodegroups",
                      skip_default_node_group=True,
                      instance_roles=[role0, role1, role2])

# Export the cluster's kubeconfig.
pulumi.export("kubeconfig", cluster.kubeconfig)

# Create a simple AWS managed node group using a cluster as input and the
# refactored API.
managed_node_group0 = eks.ManagedNodeGroup(
    "example-managed-ng0",
    cluster=cluster.
    core,  # TODO[pulumi/pulumi-eks#483]: Pass cluster directly.
    node_role=role0)

# Create a simple AWS managed node group using a cluster as input and the
# initial API.
managed_node_group1 = eks.ManagedNodeGroup(
    "example-managed-ng1",
Example #2
0
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

from vpc import Vpc

project_name = pulumi.get_project()

# Create an EKS cluster with the default configuration.
cluster1 = eks.Cluster(f"{project_name}-1")

# Create an EKS cluster with a non-default configuration.
# TODO specify tags: { "Name": f"{project_name}-2" }
vpc = Vpc(f"{project_name}-2")

cluster2 = eks.Cluster('eks-cluster',
                          vpc_id=vpc.vpc_id,
                          public_subnet_ids=vpc.public_subnet_ids,
                          public_access_cidrs=['0.0.0.0/0'],
                          desired_capacity=2,
                          min_size=2,
                          max_size=2,
                          instance_type='t3.micro',
                          # set storage class.
                          storage_classes={"gp2": eks.StorageClassArgs(
                              type='gp2', allow_volume_expansion=True, default=True, encrypted=True,)},
                          enabled_cluster_log_types=[
                              "api",
                              "audit",
                              "authenticator",
                          ],)
Example #3
0
"""An AWS Python Pulumi program"""

import pulumi
from pulumi_aws import s3
import pulumi_aws as aws
import pulumi_eks as eks

base_name = "tushar"
useast2ohio = aws.Provider("useast2ohio", region="us-east-2")

prov_cluster = eks.Cluster(base_name,
                           instance_type="t2.micro",
                           provider_credential_opts=aws.config.profile,
                           opts=pulumi.ResourceOptions(provider=useast2ohio))

pulumi.export("Cluster_name", prov_cluster.name)
pulumi.export("kubeconfig", prov_cluster.kubeconfig)
Example #4
0
    tags={"clusterAccess": "admin-usr"},
)

# Create an EKS cluster with a named profile. Map in the new IAM role into
# RBAC for usage after the cluster is running.
#
# Note, the role needs to be mapped into the cluster before it can be used.
# It is omitted from providerCredentialOpts as it will not have access
# to the cluster yet to write the aws-auth configmap for its own permissions.
# See example pod below to use the role once the cluster is ready.
cluster = eks.Cluster(project_name,
                      vpc_id=vpc.vpc_id,
                      public_subnet_ids=vpc.public_subnet_ids,
                      provider_credential_opts=eks.KubeconfigOptionsArgs(
                          profile_name=aws.config.profile, ),
                      role_mappings=[
                          eks.RoleMappingArgs(
                              groups=["system:masters"],
                              role_arn=cluster_admin_role.arn,
                              username="******",
                          ),
                      ])

# Export the cluster kubeconfig.
pulumi.export("kubeconfig", cluster.kubeconfig)

# Create a role-based kubeconfig with the named profile and the new
# role mapped into the cluster's RBAC.
role_kubeconfig = cluster.get_kubeconfig(
    profile_name=aws.config.profile,
    role_arn=cluster_admin_role.arn,
)
Example #5
0
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

project_name = pulumi.get_project()

# Create an EKS cluster with the default configuration.
cluster1 = eks.Cluster(f"{project_name}-1")

# TODO
# // Create an EKS cluster with a non-default configuration.
# const vpc = new awsx.ec2.Vpc(`${projectName}-2`, {
#     tags: { "Name": `${projectName}-2` },
# });

# const cluster2 = new eks.Cluster(`${projectName}-2`, {
#     vpcId: vpc.id,
#     publicSubnetIds: vpc.publicSubnetIds,
#     desiredCapacity: 2,
#     minSize: 2,
#     maxSize: 2,
#     deployDashboard: false,
#     enabledClusterLogTypes: [
#         "api",
#         "audit",
#         "authenticator",
#     ],
# });

# Export the clusters' kubeconfig.
pulumi.export("kubeconfig1", cluster1.kubeconfig)
Example #6
0
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

project_name = pulumi.get_project()

# For CI testing only: used to set profileName to alternate AWS_PROFILE envvar.
if not os.getenv("ALT_AWS_PROFILE"):
    raise Exception("ALT_AWS_PROFILE must be set")

# AWS named profile to use.
profile_name = os.getenv("ALT_AWS_PROFILE")

# Create an AWS provider instance using the named profile creds
# and current region.
aws_provider = aws.Provider("aws-provider",
                            profile=profile_name,
                            region=aws.get_region().name)

# Define the AWS provider credential opts to configure the cluster's
# kubeconfig auth.
kubeconfig_opts = eks.KubeconfigOptionsArgs(profile_name=profile_name)

# Create the cluster using the AWS provider and credential opts.
cluster = eks.Cluster(project_name,
                      provider_credential_opts=kubeconfig_opts,
                      opts=pulumi.ResourceOptions(provider=aws_provider))

# Export the cluster kubeconfig.
pulumi.export("kubeconfig", cluster.kubeconfig)
from pulumi import Output, ResourceOptions, export

from iam import executionRole
from vpc import privateSubnet1, privateSubnet2, publicSubnet1, publicSubnet2, vpc

tagger = Tagger(environment_name="dev")

cluster = eks.Cluster(
    resource_name="airflow",
    create_oidc_provider=True,
    fargate=True,
    name="airflow",
    private_subnet_ids=[privateSubnet1.id, privateSubnet2.id],
    public_subnet_ids=[publicSubnet1.id, publicSubnet2.id],
    role_mappings=[
        eks.RoleMappingArgs(
            groups=["system:masters"],
            role_arn=executionRole.arn,
            username="******",
        )
    ],
    version="1.19",
    vpc_id=vpc.id,
    tags=tagger.create_tags(name="airflow"),
)

export("kubeconfig", cluster.kubeconfig)

airflowNamespace = k8s.core.v1.Namespace(
    resource_name="airflow",
    metadata=k8s.meta.v1.ObjectMetaArgs(name="airflow"),
    opts=ResourceOptions(provider=cluster.provider),
Example #8
0
                    "Condition": {
                        "StringEquals": {
                            Output.concat(oidc_provider.url, ":sub"): Output.concat(
                                "system:serviceaccount:",
                                namespace,
                                ":",
                                service_account,
                            )
                        }
                    },
                }
            ],
        }
    ).apply(json.dumps)

cluster = eks.Cluster("pulumi-bug", name="pulumi-bug", create_oidc_provider=True,)

base_name = f"pulumi-bug-aws-load-balancer-controller"

policy_url = "https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.1.3/docs/install/iam_policy.json"

policy = aws.iam.Policy(base_name, policy=urlopen(policy_url).read().decode("utf8"))

role = aws.iam.Role(
    base_name,
    assume_role_policy=eks_role_policy(
        cluster.core.oidc_provider, "default", "aws-load-balancer-controller"
    ),
)

aws.iam.RolePolicyAttachment(base_name, policy_arn=policy.arn, role=role)
Example #9
0
import pulumi
import pulumi_eks as eks

from vpc import Vpc

project_name = pulumi.get_project()

# Create an EKS cluster with the fargate configuration.
vpc = Vpc(project_name)
cluster = eks.Cluster(project_name,
                      vpc_id=vpc.vpc_id,
                      private_subnet_ids=vpc.private_subnet_ids,
                      fargate=True)

# Export the cluster kubeconfig.
pulumi.export("kubeconfig", cluster.kubeconfig)
Example #10
0
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

from vpc import Vpc

project_name = pulumi.get_project()

# Create an EKS cluster with the default configuration.
cluster1 = eks.Cluster(f"{project_name}-1")

# Create an EKS cluster with a non-default configuration.
vpc = Vpc(
    f"{project_name}-2")  # TODO specify tags: { "Name": f"{project_name}-2" }

cluster2 = eks.Cluster(f"{project_name}-2",
                       vpc_id=vpc.vpc_id,
                       public_subnet_ids=vpc.public_subnet_ids,
                       desired_capacity=2,
                       min_size=2,
                       max_size=2,
                       enabled_cluster_log_types=[
                           "api",
                           "audit",
                           "authenticator",
                       ])

# Export the clusters' kubeconfig.
pulumi.export("kubeconfig1", cluster1.kubeconfig)
pulumi.export("kubeconfig2", cluster2.kubeconfig)