def __init__(self, name: str, image: str, resources: ResourceRequirementsArgs = None, replicas: int = None, ports: Sequence[int] = None, allocate_ip_address: bool = None, is_minikube: bool = None, opts: ResourceOptions = None): super().__init__('k8sx:component:ServiceDeployment', name, {}, opts) labels = {"app": name} container = ContainerArgs( name=name, image=image, resources=resources or ResourceRequirementsArgs( requests={ "cpu": "100m", "memory": "100Mi" }, ), ports=[ContainerPortArgs(container_port=p) for p in ports] if ports else None, ) self.deployment = Deployment( name, spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=labels), replicas=replicas if replicas is not None else 1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=labels), spec=PodSpecArgs(containers=[container]), ), ), opts=pulumi.ResourceOptions(parent=self)) self.service = Service( name, metadata=ObjectMetaArgs( name=name, labels=self.deployment.metadata.apply(lambda m: m.labels), ), spec=ServiceSpecArgs( ports=[ServicePortArgs(port=p, target_port=p) for p in ports] if ports else None, selector=self.deployment.spec.apply(lambda s: s.template.metadata.labels), type=("ClusterIP" if is_minikube else "LoadBalancer") if allocate_ip_address else None, ), opts=pulumi.ResourceOptions(parent=self)) if allocate_ip_address: if is_minikube: self.ip_address = self.service.spec.apply(lambda s: s.cluster_ip) else: ingress=self.service.status.apply(lambda s: s.load_balancer.ingress[0]) self.ip_address = ingress.apply(lambda i: ingress.ip or ingress.hostname or "") self.register_outputs({})
app = Deployment( "do-app-dep", spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=app_labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=app_labels), spec=PodSpecArgs(containers=[ContainerArgs(name='nginx', image='nginx')]), ), ), opts=ResourceOptions(provider=k8s_provider)) ingress = Service( 'do-app-svc', spec=ServiceSpecArgs( type='LoadBalancer', selector=app_labels, ports=[ServicePortArgs(port=80)], ), opts=ResourceOptions(provider=k8s_provider, custom_timeouts=CustomTimeouts(create="15m", delete="15m"))) ingress_ip = ingress.status.apply(lambda s: s.load_balancer.ingress[0].ip) export('ingress_ip', ingress_ip) if domain_name: domain = do.Domain( "do-domain", name=domain_name, ip_address=ingress_ip) cname_record = do.DnsRecord( "do-domain-name",
resources=ResourceRequirementsArgs(requests={ "cpu": "100m", "memory": "100Mi", }, ), ports=[ContainerPortArgs(container_port=6379, )], ) ], ), ), )) redis_leader_service = Service( "redis-leader", metadata=ObjectMetaArgs(name="redis-leader", labels=redis_leader_labels), spec=ServiceSpecArgs( ports=[ServicePortArgs( port=6379, target_port=6379, )], selector=redis_leader_labels)) redis_replica_labels = { "app": "redis-replica", } redis_replica_deployment = Deployment( "redis-replica", spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=redis_replica_labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=redis_replica_labels, ), spec=PodSpecArgs(
value=mongodb_username), EnvVarArgs(name='mongodb_password', value=mongodb_password), EnvVarArgs(name='mongodb_database', value=mongodb_database) ]) ], image_pull_secrets=[ LocalObjectReferenceArgs(name=image_pull_secret) ])))) demo_service = Service("demo", metadata=ObjectMetaArgs(name="demo", namespace=namespace_name), spec=ServiceSpecArgs( selector={"app": "demo"}, ports=[ServicePortArgs(port=int(demo_port))], type="ClusterIP")) ingress = Ingress( "ingress", metadata=ObjectMetaArgs(namespace=namespace_name, annotations={ "kubernetes.io/ingress.class": "nginx", "nginx.ingress.kubernetes.io/ssl-redirect": "false", "nginx.ingress.kubernetes.io/use-regex": "true", "nginx.ingress.kubernetes.io/rewrite-target": "/$1" }), spec=IngressSpecArgs(rules=[
k8s_provider = Provider('gke_k8s', kubeconfig=k8s_config) # Create a canary deployment to test that this cluster works. labels = {'app': 'canary-{0}-{1}'.format(get_project(), get_stack())} canary = Deployment( 'canary', spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=labels), spec=PodSpecArgs( containers=[ContainerArgs(name='nginx', image='nginx')]), ), ), opts=ResourceOptions(provider=k8s_provider)) ingress = Service('ingress', spec=ServiceSpecArgs( type='LoadBalancer', selector=labels, ports=[ServicePortArgs(port=80)], ), opts=ResourceOptions(provider=k8s_provider)) # Finally, export the kubeconfig so that the client can easily access the cluster. export('kubeconfig', k8s_config) # Export the k8s ingress IP to access the canary deployment export('ingress_ip', ingress.status.apply(lambda status: status.load_balancer.ingress[0].ip))
resources=ResourceRequirementsArgs(requests={ "cpu": "100m", "memory": "100Mi", }, ), ports=[ContainerPortArgs(container_port=6379, )], ) ], ), ), )) redis_leader_service = Service( "redis-leader", metadata=ObjectMetaArgs(namespace=namespace, labels=redis_leader_labels), spec=ServiceSpecArgs( ports=[ServicePortArgs( port=6379, target_port=6379, )], selector=redis_leader_labels)) redis_follower_labels = {"app": "redis", "tier": "backend", "role": "slave"} redis_follower_deployment = Deployment( "redis-follower", metadata=ObjectMetaArgs(namespace=namespace), spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=redis_follower_labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=redis_follower_labels, ), spec=PodSpecArgs( containers=[
def __init__(self, name, credentials, resources, image=None, opts=None): super().__init__("jenkins:jenkins:Instance", name, { "credentials": credentials, "resources": resources, "image": image }, opts) # The Secret will contain the root password for this instance. secret = Secret( name + "-secret", metadata=ObjectMetaArgs(name=name, ), type="Opaque", data={ "jenkins-password": str( base64.b64encode(bytes(credentials["password"], "utf-8"), None), "utf-8"), }, opts=ResourceOptions(parent=self), ) # The PVC provides persistent storage for Jenkins states. pvc = PersistentVolumeClaim( name + "-pvc", metadata=ObjectMetaArgs(name=name, ), spec=PersistentVolumeClaimSpecArgs( access_modes=["ReadWriteOnce"], resources=ResourceRequirementsArgs(requests={ "storage": "8Gi", }, ), ), opts=ResourceOptions(parent=self), ) # The Deployment describes the desired state for our Jenkins setup. deploymentArgs = create_deployment_args(name, credentials, resources, image) deployment = Deployment( name + "-deploy", metadata=deploymentArgs.metadata, spec=deploymentArgs.spec, opts=ResourceOptions(parent=self), ) # The Service exposes Jenkins to the external internet by providing load-balanced ingress for HTTP and HTTPS. service = Service(name + "-service", metadata=ObjectMetaArgs(name=name, ), spec=ServiceSpecArgs( type="LoadBalancer", ports=[ ServicePortArgs( name="http", port=80, target_port="http", ), ServicePortArgs( name="https", port=443, target_port="https", ), ], selector={ "app": name, }, ), opts=ResourceOptions(parent=self)) ingress = service.status.apply(lambda s: s.load_balancer.ingress[0]) self.external_ip = ingress.apply(lambda x: x.ip or x.hostname) self.register_outputs({"external_ip": self.external_ip})
""".format(info[2]['clusterCaCertificate'], info[1], '{0}_{1}_{2}'.format(project, zone, info[0]))) # Make a Kubernetes provider instance that uses our cluster from above. k8s_provider = Provider('gke_k8s', kubeconfig=k8s_config) # Create a canary deployment to test that this cluster works. labels = { 'app': 'canary-{0}-{1}'.format(get_project(), get_stack()) } canary = Deployment('canary', spec=DeploymentSpecArgs( selector=LabelSelectorArgs(match_labels=labels), replicas=1, template=PodTemplateSpecArgs( metadata=ObjectMetaArgs(labels=labels), spec=PodSpecArgs(containers=[ContainerArgs(name='nginx', image='nginx')]), ), ), __opts__=ResourceOptions(provider=k8s_provider) ) ingress = Service('ingress', spec=ServiceSpecArgs( type='LoadBalancer', selector=labels, ports=[ServicePortArgs(port=80)], ), __opts__=ResourceOptions(provider=k8s_provider) ) # Finally, export the kubeconfig so that the client can easily access the cluster. export('kubeconfig', k8s_config) # Export the k8s ingress IP to access the canary deployment export('ingress_ip', ingress.status.apply(lambda status: status.load_balancer.ingress[0].ip))