def eks_stuff(): eks_cluster = EKSCluster("jeid-test-160") provider = eks_cluster.kube_provider app_labels = {"app": "nginx"} provresopts = pulumi.ResourceOptions(provider=provider) service_account = ServiceAccount("test-service-account2", metadata={ "namespace": "kube-system", "labels": app_labels }, opts=provresopts) deployment = Deployment( "nginx", spec={ "selector": { "match_labels": app_labels }, "replicas": 1, "template": { "metadata": { "labels": app_labels }, "spec": { "containers": [{ "name": "nginx", "image": "nginx" }], 'serviceAccountName': service_account.metadata["name"] } # here's where it breaks } })
def __init__(self, name: str, replicas: int = 1, image: str = None, ports: [int] = None, envvars: [dict] = None, opts: ResourceOptions = None): super().__init__("my:modules:SimpleDeployment", name, {}, opts) labels = {"app": name} container = { "name": name, "image": image, "ports": [{ "container_port": p } for p in ports] if ports else None, "env": envvars, } self.deployment = Deployment(name, spec={ "selector": { "match_labels": labels }, "replicas": replicas, "template": { "metadata": { "labels": labels }, "spec": { "containers": [container] }, }, }, opts=ResourceOptions(parent=self)) self.service = Service( name, metadata={ "name": name, "labels": self.deployment.metadata['labels'], }, spec={ "ports": [{ "port": p, "targetPort": p } for p in ports] if ports else None, "selector": self.deployment.spec['template']['metadata']['labels'], "type": "LoadBalancer", }, opts=ResourceOptions(parent=self)) self.register_outputs({})
def __init__(self, name: str, image: str, resources: ResourceRequirementsArgs = None, replicas: int = None, ports: Sequence[int] = None, allocate_ip_address: bool = None, is_minikube: bool = None, opts: ResourceOptions = None): super().__init__('k8sx:component:ServiceDeployment', name, {}, opts) labels = {"app": name} container = ContainerArgs( name=name, image=image, resources=resources or ResourceRequirementsArgs( requests={ "cpu": "100m", "memory": "100Mi" }, ), ports=[ContainerPortArgs(container_port=p) for p in ports] if ports else None, ) self.deployment = Deployment( name, spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=labels), replicas=replicas if replicas is not None else 1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=labels), spec=PodSpecArgs(containers=[container]), ), ), opts=pulumi.ResourceOptions(parent=self)) self.service = Service( name, metadata=ObjectMetaArgs( name=name, labels=self.deployment.metadata.apply(lambda m: m.labels), ), spec=ServiceSpecArgs( ports=[ServicePortArgs(port=p, target_port=p) for p in ports] if ports else None, selector=self.deployment.spec.apply(lambda s: s.template.metadata.labels), type=("ClusterIP" if is_minikube else "LoadBalancer") if allocate_ip_address else None, ), opts=pulumi.ResourceOptions(parent=self)) if allocate_ip_address: if is_minikube: self.ip_address = self.service.spec.apply(lambda s: s.cluster_ip) else: ingress=self.service.status.apply(lambda s: s.load_balancer.ingress[0]) self.ip_address = ingress.apply(lambda i: ingress.ip or ingress.hostname or "") self.register_outputs({})
def __init__(self, name, args, opts=None): super().__init__("ServiceDeployment", name, {}, opts) self.name = name self.labels = {"app": name} self.deployment = Deployment(name, spec={ "selector": { "match_labels": self.labels }, "replicas": args.get("replicas", 1), "template": { "metadata": { "labels": self.labels }, "spec": { "containers": [{ "name": self.name, "image": args.get("image") }] } } }) self.service = Service(name, spec={ "type": args.get("serviceType", "ClusterIP"), "ports": [{ "port": args.get("port"), "targetPort": args.get("port"), "protocol": args.get("protocol", "TCP") }], "selector": self.labels }) pulumi.export( "frontendIp", self.service.status["load_balancer"]["ingress"][0]["hostname"])
isMinikube = config.get_bool("isMinikube") redis_leader_labels = { "app": "redis-leader", } redis_leader_deployment = Deployment( "redis-leader", spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=redis_leader_labels, ), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=redis_leader_labels, ), spec=PodSpecArgs(containers=[ ContainerArgs( name="redis-leader", image="redis", resources=ResourceRequirementsArgs(requests={ "cpu": "100m", "memory": "100Mi", }, ), ports=[ContainerPortArgs(container_port=6379, )], ) ], ), ), )) redis_leader_service = Service( "redis-leader", metadata=ObjectMetaArgs(name="redis-leader", labels=redis_leader_labels), spec=ServiceSpecArgs( ports=[ServicePortArgs(
def __init__(self, name: str, image: str, resources: dict = None, replicas: int = None, ports: List[int] = None, allocate_ip_address: bool = None, is_minikube: bool = None, opts: ResourceOptions = None): super().__init__('k8sx:component:ServiceDeployment', name, {}, opts) labels = {"app": name} container = { "name": name, "image": image, "resources": resources or { "requests": { "cpu": "100m", "memory": "100Mi" } }, "ports": [{ "container_port": p } for p in ports] if ports else None, } self.deployment = Deployment(name, spec={ "selector": { "match_labels": labels }, "replicas": 1, "template": { "metadata": { "labels": labels }, "spec": { "containers": [container] }, }, }, opts=pulumi.ResourceOptions(parent=self)) self.service = Service( name, metadata={ "name": name, "labels": self.deployment.metadata['labels'], }, spec={ "ports": [{ "port": p, "targetPort": p } for p in ports] if ports else None, "selector": self.deployment.spec['template']['metadata']['labels'], "type": ("ClusterIP" if is_minikube else "LoadBalancer") if allocate_ip_address else None, }, opts=pulumi.ResourceOptions(parent=self)) if allocate_ip_address: if is_minikube: self.ip_address = self.service.spec['clusterIP'] else: ingress = self.service.status['load_balancer']['ingress'][0] self.ip_address = ingress.apply(lambda i: ingress["ip"] if "ip" in i else ingress['hostname']) self.register_outputs({})
# Create a NGINX Deployment appLabels = {"appClass": name} deployment = Deployment(name, metadata={"labels": appLabels}, spec={ "selector": { "match_labels": appLabels }, "replicas": 1, "template": { "metadata": { "labels": appLabels }, "spec": { "containers": [{ "name": name, "image": "nginx", "ports": [{ "name": "http", "containerPort": 80 }] }] } } }, __opts__=ResourceOptions(provider=custom_provider)) # Create nginx service service = Service(name,
token-key: '{{.credential.access_token}}' name: gcp """.format(info[2]['clusterCaCertificate'], info[1], '{0}_{1}_{2}'.format( project, zone, info[0]))) # Make a Kubernetes provider instance that uses our cluster from above. k8s_provider = Provider('gke_k8s', kubeconfig=k8s_config) # Create a canary deployment to test that this cluster works. labels = {'app': 'canary-{0}-{1}'.format(get_project(), get_stack())} canary = Deployment( 'canary', spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=labels), spec=PodSpecArgs( containers=[ContainerArgs(name='nginx', image='nginx')]), ), ), __opts__=ResourceOptions(provider=k8s_provider)) ingress = Service('ingress', spec=ServiceSpecArgs( type='LoadBalancer', selector=labels, ports=[ServicePortArgs(port=80)], ), __opts__=ResourceOptions(provider=k8s_provider)) # Finally, export the kubeconfig so that the client can easily access the cluster.
# Copyright 2016-2020, Pulumi Corporation. All rights reserved. import pulumi from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs from pulumi_kubernetes.core.v1 import ContainerArgs, ContainerPortArgs, PodSpecArgs, PodTemplateSpecArgs from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs config = pulumi.Config() nginxLabels = {"app": "nginx"} nginxDeployment = Deployment( "nginx-deployment", spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=nginxLabels), replicas=2 if config.get_int("replicas") is None else config.get_int("replicas"), template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=nginxLabels), spec=PodSpecArgs(containers=[ ContainerArgs( name="nginx", image="nginx:1.7.9", ports=[ContainerPortArgs(container_port=80)], ) ], ), ), )) pulumi.export("nginx", nginxDeployment.metadata.apply(lambda m: m.name))
spec = \ { "containers": [flask_container], "imagePullSecrets": [ecr_pull_secret] } deployment = Deployment("flask", spec={ "selector": { "match_labels": app_labels }, "replicas": 1, "template": { "metadata": { "labels": app_labels }, "spec": spec } }) frontend = Service("flask", metadata={ "labels": deployment.spec["template"]["metadata"]["labels"], }, spec={ "type": "LoadBalancer",
# running on minikube, and if so, create only services of type ClusterIP. config = pulumi.Config() is_minikube = config.require_bool("isMinikube") app_name = "nginx" app_labels = {"app": "nginx"} deployment = Deployment("nginx", spec={ "selector": { "match_labels": app_labels }, "replicas": 1, "template": { "metadata": { "labels": app_labels }, "spec": { "containers": [{ "name": app_name, "image": "nginx" }] } } }) port = {"port": 80, "target_port": 80, "protocol": "TCP"} if is_minikube: port.update(node_port=30000) frontend = Service(app_name,
), ) k8s_provider = Provider( "k8s", kubeconfig=aks.kube_config_raw, ) labels = {"app": "nginx"} nginx = Deployment( "k8s-nginx", spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=labels), spec=PodSpecArgs( containers=[ContainerArgs(name="nginx", image="nginx")]), ), ), opts=ResourceOptions(parent=k8s_provider, provider=k8s_provider), ) ingress = Service( "k8s-nginx", spec=ServiceSpecArgs(type="LoadBalancer", selector=labels, ports=[ServicePortArgs(port=80)]), opts=ResourceOptions(parent=k8s_provider, provider=k8s_provider), )
def __init__(self, name, credentials, resources, image=None, opts=None): super(Instance, self).__init__("jenkins:jenkins:Instance", name, { "credentials": credentials, "resources": resources, "image": image }, opts) # The Secret will contain the root password for this instance. secret = Secret( name + "-secret", metadata={ "name": name, }, type="Opaque", data={ "jenkins-password": str( base64.b64encode(bytes(credentials["password"], "utf-8"), None), "utf-8"), }, opts=ResourceOptions(parent=self), ) # The PVC provides persistent storage for Jenkins states. pvc = PersistentVolumeClaim( name + "-pvc", metadata={ "name": name, }, spec={ "accessModes": ["ReadWriteOnce"], "resources": { "requests": { "storage": "8Gi", }, }, }, opts=ResourceOptions(parent=self), ) # The Deployment describes the desired state for our Jenkins setup. deploymentArgs = createDeploymentArgs(name, credentials, resources, image) deployment = Deployment( name + "-deploy", metadata=deploymentArgs["metadata"], spec=deploymentArgs["spec"], opts=ResourceOptions(parent=self), ) # The Service exposes Jenkins to the external internet by providing load-balanced ingress for HTTP and HTTPS. service = Service(name + "-service", metadata={ "name": name, }, spec={ "type": "LoadBalancer", "ports": [ { "name": "http", "port": 80, "targetPort": "http", }, { "name": "https", "port": 443, "targetPort": "https", }, ], "selector": { "app": name, }, }, opts=ResourceOptions(parent=self)) # This component resource has no outputs. self.register_outputs({})
mongodb_database = os.getenv('mongodb_database') deployment_demo = Deployment( "demo", metadata=ObjectMetaArgs(name="demo", namespace=namespace_name), spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels={"app": "demo"}), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels={"app": "demo"}), spec=PodSpecArgs( containers=[ ContainerArgs(name="demo", image=docker_registry_repository + ':latest', env=[ EnvVarArgs(name='mongodb_host', value=mongodb_host), EnvVarArgs(name='mongodb_port', value=mongodb_port), EnvVarArgs(name='mongodb_username', value=mongodb_username), EnvVarArgs(name='mongodb_password', value=mongodb_password), EnvVarArgs(name='mongodb_database', value=mongodb_database) ]) ], image_pull_secrets=[ LocalObjectReferenceArgs(name=image_pull_secret) ])))) demo_service = Service("demo",
app_labels = {"app": "hello-k8s"} deployment = Deployment( "hello-k8s", spec={ "selector": { "match_labels": app_labels }, "replicas": 1, "template": { "metadata": { "labels": app_labels }, "spec": { "containers": [{ "name": "hello-k8s", "image": "chrisley75/hello-kubernetes:v1", "env": [{ "name": "MESSAGE", "value": "Application deployee et geree avec Pulumi" }] }] } } }) frontend = Service(app_name,
polaris = Deployment( "k8s-polaris-deployment1", spec={ "selector": { "matchLabels": labels }, "replicas": 15, "template": { "metadata": { "labels": labels }, "spec": { "containers": [{ "name": "polaris", "image": DOCKER_REPO_URI, "env": [{ "name": "cosmosKey", "value": cosmos_db_account.primary_master_key }, { "name": "cosmosDbAccountName", "value": cosmos_db_account.name }, { "name": "cosmodDbDatabaseName", "value": cosmos_db_database.name }, { "name": "cosmosDbCollectionId", "value": cosmos_db_container.name }] }] }, }, }, __opts__=ResourceOptions(parent=k8s_provider, provider=k8s_provider), )
deployment = Deployment("test-deployment", spec={ "selector": { "match_labels": app_labels }, "replicas": 2, "template": { "metadata": { "labels": app_labels }, "spec": { "containers": [{ "name": "nginx", "image": "nginx", "volumeMounts": [{ "name": "secret-volume", "mountPath": "/etc/secret-volume" }] }], "volumes": [{ "name": "secret-volume", "secret": { "secretName": secret.metadata['name'] } }] } } })
"""A Kubernetes Python Pulumi program""" import pulumi from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs from pulumi_kubernetes.core.v1 import ContainerArgs, PodSpecArgs, PodTemplateSpecArgs app_labels = {"app": "nginx"} deployment = Deployment( "nginx", spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=app_labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=app_labels), spec=PodSpecArgs( containers=[ContainerArgs(name="nginx", image="nginx")])), )) pulumi.export("name", deployment.metadata["name"])
def create(): app_labels = {"app": "nginx"} serviceAccount = ServiceAccount("coredns", metadata={ "name": "coredns", "namespace": "kube-system" }) clusterRole = ClusterRole( "system:coredns", metadata={ "name": "system:coredns", "labels": { "kubernetes.io/bootstrapping": "rbac-defaults" } }, rules=[{ "apiGroups": [""], "resources": ["endpoints", "services", "pods", "namespaces"], "verbs": ["list", "watch"] }]) clusterRoleBinding = ClusterRoleBinding( "system:coredns", metadata={ "name": "system:coredns", "labels": { "kubernetes.io/bootstrapping": "rbac-defaults" }, "annotations": { "rbac.authorization.kubernetes.io/autoupdate": "true" } }, role_ref={ "apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "system:coredns" }, subjects=[{ "kind": "ServiceAccount", "name": "coredns", "namespace": "kube-system" }]) configMap = ConfigMap("coredns", metadata={ "name": "coredns", "namespace": "kube-system" }, data={ "Corefile": """.:53 { errors health kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure upstream fallthrough in-addr.arpa ip6.arpa } prometheus :9153 proxy . /etc/resolv.conf cache 30 loop reload loadbalance }""" }) deployment = Deployment( "coredns", metadata={ "name": "coredns", "namespace": "kube-system", "labels": { "k8s-app": "kube-dns", "kubernetes.io/name": "CoreDNS" } }, spec={ "selector": { "matchLabels": { "k8s-app": "kube-dns" } }, "replicas": 2, "strategy": { "type": "RollingUpdate", "rollingUpdate": { "maxUnavailable": 1 } }, "template": { "metadata": { "labels": { "k8s-app": "kube-dns" } }, "spec": { "serviceAccountName": "coredns", "tolerations": [ { "key": "node-role.kubernetes.io/master", "effect": "NoSchedule" }, { "key": "CriticalAddonsOnly", "operator": "Exists" }, ], "volumes": [{ "name": "config-volume", "configMap": { "name": "coredns", "items": [{ "key": "Corefile", "path": "Corefile" }] } }], "dnsPolicy": "Default", "containers": [{ "name": "coredns", "image": "coredns/coredns:1.2.2", "imagePullPolicy": "IfNotPresent", "resources": { "limits": { "memory": "170Mi" }, "requests": { "cpu": "100m", "memory": "70Mi" } }, "args": ["-conf", "/etc/coredns/Corefile"], "volumeMounts": [{ "name": "config-volume", "mountPath": "/etc/coredns", "readOnly": True }], "ports": [{ "containerPort": 53, "name": "dns", "protocol": "UDP" }, { "containerPort": 53, "name": "dns-tcp", "protocol": "TCP" }, { "containerPort": 9153, "name": "metrics", "protocol": "TCP" }], "securityContext": { "allowPrivilegeEscalation": False, "capabilities": { "add": ["NET_BIND_SERVICE"], "drop": ["all"] } }, "livenessProbe": { "httpGet": { "path": "/health", "port": 8080, "scheme": "HTTP" }, "initialDelaySeconds": 60, "timeoutSeconds": 5, "successThreshold": 1, "failureThreshold": 5 } }] } } }) service = Service("kube-dns", metadata={ "name": "kube-dns", "namespace": "kube-system", "labels": { "k8s-app": "kube-dns", "kubernetes.io/name": "CoreDNS", "kubernetes.io/cluster-service": "true" }, "annotations": { "prometheus.io/port": "9153", "prometheus.io/scrape": "true" } }, spec={ "selector": { "k8s-app": "kube-dns" }, "clusterIP": "10.32.0.10", "ports": [{ "port": 53, "name": "dns", "protocol": "UDP" }, { "port": 53, "name": "dns-tcp", "protocol": "TCP" }] }) #pulumi.export("name", deployment.metadata["name"])
}) k8s_provider = Provider("do-k8s", kubeconfig=cluster.kube_configs[0]["rawConfig"]) app_labels = {"app": "app-nginx"} app = Deployment("do-app-dep", spec={ 'selector': { 'matchLabels': app_labels }, 'replicas': 1, 'template': { 'metadata': { 'labels': app_labels }, 'spec': { 'containers': [{ 'name': 'nginx', 'image': 'nginx' }] }, }, }, __opts__=ResourceOptions(provider=k8s_provider)) ingress = Service('do-app-svc', spec={ 'type': 'LoadBalancer', 'selector': app_labels, 'ports': [{
}, }, })), }, ) # Deploy a load-balanced service that uses this image. labels = { 'app': 'my-app' } dep = Deployment('my-app-dep', spec={ 'selector': { 'matchLabels': labels }, 'replicas': 1, 'template': { 'metadata': { 'labels': labels }, 'spec': { 'containers': [{ 'name': labels['app'], 'image': image.image_name, }], 'image_pull_secrets': [{ 'name': pull_secret.metadata['name'], }], }, }, }) svc = Service('my-app-svc', spec={ 'selector': labels, 'type': 'LoadBalancer', 'ports': [{ 'port': 80 }], }) # Export the resulting image name. pulumi.export('imageName', image.image_name)
sql_deployment = Deployment( name, metadata={"labels": appLabels}, spec={ "selector": { "match_labels": appLabels }, "replicas": 1, "template": { "metadata": { "labels": appLabels }, "spec": { "containers": [{ "name": name, "image": "mcr.microsoft.com/mssql/server:2017-latest-ubuntu", "ports": [{ "name": "sql", "containerPort": 1433 }], "env": [{ "name": "ACCEPT_EULA", "value": "Y" }, { "name": "SA_PASSWORD", "value": SA_PASSWORD }, { "name": "MSSQL_PID", "value": "Developer" }] }] } } }, __opts__=ResourceOptions(provider=custom_provider))
app_labels = { "app": "iac-workshop" } app_deployment = Deployment("app-dep", metadata={ "namespace": ns.metadata["name"] }, spec={ "selector": { "match_labels": app_labels, }, "replicas": 1, "template": { "metadata": { "labels": app_labels, }, "spec": { "containers": [{ "name": "iac-workshop", "image": "gcr.io/google-samples/kubernetes-bootcamp:v1", }], }, }, }, opts=ResourceOptions(provider=k8s_provider) ) service = Service("app-service", metadata={ "namespace": ns.metadata["name"],
gke_deployment = Deployment( app_name, metadata={ 'namespace': ns, 'labels': app_label, }, spec={ 'replicas': 3, 'selector': { 'matchLabels': app_label }, 'template': { 'metadata': { 'labels': app_label }, 'spec': { 'containers': [{ 'name': app_name, 'image': docker_image, 'ports': [{ 'name': 'port-5000', 'container_port': 5000 }] }] } } }, __opts__=ResourceOptions(provider=cluster_provider))
import pulumi from pulumi import Config, ResourceOptions from pulumi_kubernetes import Provider from pulumi_kubernetes.apps.v1 import Deployment config = Config() namespace = config.get("namespace") or "default" provider = Provider("kubernetes", namespace=namespace) app_labels = {"app": "nginx"} deployment = Deployment( "nginx", spec={ "selector": {"match_labels": app_labels}, "replicas": 1, "template": { "metadata": {"labels": app_labels}, "spec": {"containers": [{"name": "nginx", "image": "nginx"}]}, }, }, opts=ResourceOptions(provider=provider), ) pulumi.export("name", deployment.metadata["name"])
redis_leader_deployment = Deployment("redis-leader", spec={ "selector": { "match_labels": redis_leader_labels, }, "replicas": 1, "template": { "metadata": { "labels": redis_leader_labels, }, "spec": { "containers": [{ "name": "redis-leader", "image": "redis", "resources": { "requests": { "cpu": "100m", "memory": "100Mi", }, }, "ports": [{ "container_port": 6379, }], }], }, }, })
redis_leader_deployment = Deployment("redis-leader", spec={ "selector": { "match_labels": redis_leader_labels, }, "replicas": 1, "template": { "metadata": { "labels": redis_leader_labels, }, "spec": { "containers": [{ "name": "master", "image": "k8s.gcr.io/redis:e2e", "resources": { "requests": { "cpu": "100m", "memory": "100Mi", }, }, "ports": [{ "container_port": 6379, }], }], }, }, })
region="nyc3", version="latest", node_pool=do.KubernetesClusterNodePoolArgs( name="default", size="s-2vcpu-2gb", node_count=node_count )) k8s_provider = Provider("do-k8s", kubeconfig=cluster.kube_configs.apply(lambda c: c[0].raw_config)) app_labels = { "app": "app-nginx" } app = Deployment( "do-app-dep", spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=app_labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=app_labels), spec=PodSpecArgs(containers=[ContainerArgs(name='nginx', image='nginx')]), ), ), opts=ResourceOptions(provider=k8s_provider)) ingress = Service( 'do-app-svc', spec=ServiceSpecArgs( type='LoadBalancer', selector=app_labels, ports=[ServicePortArgs(port=80)], ), opts=ResourceOptions(provider=k8s_provider, custom_timeouts=CustomTimeouts(create="15m", delete="15m"))) ingress_ip = ingress.status.apply(lambda s: s.load_balancer.ingress[0].ip)
# Make a Kubernetes provider instance that uses our cluster from above. k8s_provider = Provider("gke_k8s", kubeconfig=k8s_config) # Create a canary deployment to test that this cluster works. labels = {"app": "canary-{0}-{1}".format(get_project(), get_stack())} canary = Deployment( "canary", spec={ "selector": { "matchLabels": labels }, "replicas": 1, "template": { "metadata": { "labels": labels }, "spec": { "containers": [{ "name": "nginx", "image": "nginx" }] }, }, }, __opts__=ResourceOptions(provider=k8s_provider), ) ingress = Service( "ingress", spec={ "type": "LoadBalancer",
config = pulumi.Config() nginxLabels = {"app": "nginx"} nginxDeployment = Deployment("nginx-deployment", spec={ "selector": { "matchLabels": nginxLabels }, "replicas": 2 if config.get_int("replicas") is None else config.get_int("replicas"), "template": { "metadata": { "labels": nginxLabels }, "spec": { "containers": [{ "name": "nginx", "image": "nginx:1.7.9", "ports": [{ "containerPort": 80 }], }], }, }, }) pulumi.export("nginx", nginxDeployment.metadata["name"])