示例#1
0
 def setup_kubernetes_provider(self):
     # Now we're gonna configure Pulumi to talk to the controlplane to do k8s stuff.
     eks_parent_opts = pulumi.ResourceOptions(
         parent=self.eks_cluster_resource)
     kubectl_config = pulumi.Output.all(
         self.eks_cluster_resource, "us-west-2",
         self.endpoint).apply(generateKubectlConfig)
     self.kube_provider = pulumi_kubernetes.Provider(
         self.cluster_name + "-k8s-provider",
         kubeconfig=kubectl_config,
         __opts__=eks_parent_opts)
# function.
# That function requires passing values that are not be known until the resources are created.
# Thus, the use of "apply()" to wait for those values before calling the function.
creds = pulumi.Output.all(resource_group.name, k8s_cluster.name).apply(
    lambda args: containerservice.list_managed_cluster_user_credentials(
        resource_group_name=args[0], resource_name=args[1]))

# The "list_managed_cluster_user_credentials" function returns an array of base64 encoded kubeconfigs.
# So decode the kubeconfig for our cluster but mark it as a secret so Pulumi treats it accordingly.
kubeconfig = pulumi.Output.secret(creds.kubeconfigs[0].value.apply(
    lambda enc: base64.b64decode(enc).decode()))
### End of AKS Cluster Related Resources

# The K8s provider which supplies the helm chart resource needs to know how to talk to the K8s cluster.
# So, instantiate a K8s provider using the retrieved kubeconfig.
k8s_provider = k8s.Provider('k8s-provider', kubeconfig=kubeconfig)

# Create a chart resource to deploy apache using the k8s provider instantiated above.
apache = Chart('apache-chart',
               ChartOpts(
                   chart='apache',
                   version='8.3.2',
                   fetch_opts={'repo': 'https://charts.bitnami.com/bitnami'}),
               opts=ResourceOptions(provider=k8s_provider))

# Get the helm-deployed apache service IP which isn't known until the chart is deployed.
apache_service_ip = apache.get_resource(
    'v1/Service',
    'apache-chart').apply(lambda res: res.status.load_balancer.ingress[0].ip)

# Correct option using "concat()"
示例#3
0
import pulumi
import pulumi_kubernetes as kubernetes

from cluster import aks

k8s_provider = kubernetes.Provider("aks-k8s-provider",
                                   kubeconfig=aks.kube_config_raw)

backend_name = "azure-vote-back"
REDIS_PORT = 6379

azure_vote_back_deployment = kubernetes.apps.v1.Deployment(
    "azure_vote_backDeployment",
    api_version="apps/v1",
    kind="Deployment",
    metadata={
        "name": backend_name,
    },
    spec={
        "replicas": 1,
        "selector": {
            "match_labels": {
                "app": backend_name,
            },
        },
        "template": {
            "metadata": {
                "labels": {
                    "app": backend_name,
                },
            },
示例#4
0
from pulumi_kubernetes.core.v1 import Namespace
from pulumi_kubernetes.storage.v1 import StorageClass

# Get the stack
stack = pulumi.get_stack()
sr = "jaxxstorm/cluster/{}".format(stack)
stack_ref = pulumi.StackReference(sr)
# Get the kubeconfig from the stack
kubeconfig = stack_ref.get_output("kubeConfig")

# Get configuration options
config = pulumi.Config()
namespace = config.require("namespace")

# Set up the provider
provider = k8s.Provider("home.lbrlabs", kubeconfig=kubeconfig)

# Create the namespace
ns = Namespace(
    "ns",
    metadata={
        "name": namespace,
    },
    opts=pulumi.ResourceOptions(provider=provider),
)

# Install the helm chart
helm.Chart(
    "local-volume-provisioner",
    helm.LocalChartOpts(path="charts/provisioner",
                        namespace=ns.metadata["name"],
示例#5
0
               prefs='{}',
               expToken='{.credential.token_expiry}',
               tokenKey='{.credential.access_token}')

    return config


gke_masterAuth = cluster.master_auth['clusterCaCertificate']
gke_endpoint = cluster.endpoint
gke_context = gcp_project + '_' + gcp_zone + '_' + cluster_name

k8s_config = pulumi.Output.all(
    gke_masterAuth, gke_endpoint,
    gke_context).apply(lambda args: generate_k8_config(*args))

cluster_provider = pulumi_kubernetes.Provider(cluster_name,
                                              kubeconfig=k8s_config)
ns = Namespace(cluster_name,
               __opts__=ResourceOptions(provider=cluster_provider))

gke_deployment = Deployment(
    app_name,
    metadata={
        'namespace': ns,
        'labels': app_label,
    },
    spec={
        'replicas': 3,
        'selector': {
            'matchLabels': app_label
        },
        'template': {
# Exercise 1
# Create a cluster resource using our custom component resource class
cluster = cluster.Cluster(
    'k8scluster',
    cluster.ClusterArgs(
        resource_group_name=resource_group.name,
        password=password,
        node_count=node_count,
        node_size=node_size,
        k8s_version=k8s_version,
        admin_username=admin_username,
    ))

# The K8s provider which supplies the helm chart resource needs to know how to talk to the K8s cluster.
# So, instantiate a K8s provider using the retrieved kubeconfig.
k8s_provider = k8s.Provider('k8s-provider', kubeconfig=cluster.kubeconfig)

# Create a chart resource to deploy apache using the k8s provider instantiated above.
apache = Chart('apache-chart',
               ChartOpts(
                   chart='apache',
                   version='8.3.2',
                   fetch_opts={'repo': 'https://charts.bitnami.com/bitnami'}),
               opts=ResourceOptions(provider=k8s_provider))

# Get the helm-deployed apache service IP which isn't known until the chart is deployed.
apache_service_ip = apache.get_resource(
    'v1/Service',
    'apache-chart').apply(lambda res: res.status.load_balancer.ingress[0].ip)

# Correct option using "concat()"
示例#7
0
                              username="******",
                          ),
                      ])

# Export the cluster kubeconfig.
pulumi.export("kubeconfig", cluster.kubeconfig)

# Create a role-based kubeconfig with the named profile and the new
# role mapped into the cluster's RBAC.
role_kubeconfig = cluster.get_kubeconfig(
    profile_name=aws.config.profile,
    role_arn=cluster_admin_role.arn,
)
pulumi.export("roleKubeconfig", role_kubeconfig)
role_provider = k8s.Provider(
    "provider",
    kubeconfig=role_kubeconfig,
    opts=pulumi.ResourceOptions(depends_on=[cluster.provider]))

# Create a pod with the role-based kubeconfig.
pod = k8s.core.v1.Pod(
    "nginx",
    spec=k8s.core.v1.PodSpecArgs(containers=[
        k8s.core.v1.ContainerArgs(
            name="nginx",
            image="nginx",
            ports=[
                k8s.core.v1.ContainerPortArgs(name="http", container_port=80)
            ],
        )
    ], ),
    opts=pulumi.ResourceOptions(provider=role_provider),