def create_deployment(project_name: str, images: list, ports: dict = {}, envs: dict = {}): # parameter : project_name:str, images:list, ports:dict {image_name: port_list}, envs:dict (image_name: env) # label name --> project name + etc.. # readinessProbe .. tcp or http # envs --> image_name : env --> env: list of dict ({name: xxx, value: xxx}) containers = list() for image in images: name = image.split(":")[0] tag = image.split(":")[1] container_ports = [client.V1ContainerPort(p) for p in ports[image] ] if ports and ports.get(image) else None # print(envs) container_envs = [client.V1EnvVar(name=e.get("name"), value=e.get("value")) for e in envs.get(image)]\ if envs.get(image) else None image = REGISTRY_PREFIX + image containers.append( client.V1Container( name=name, image=image, # ports=container_ports, env=container_envs, readiness_probe=client.V1Probe( initial_delay_seconds=10, period_seconds=15, tcp_socket=client.V1TCPSocketAction( port=container_ports[0].container_port)), image_pull_policy="Always" if tag == "latest" else "IfNotPresent")) # print(containers) project_hash = hashlib.sha256(project_name.encode()).hexdigest()[:16] labels = {"identifier": project_hash} template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels=labels), spec=client.V1PodSpec(containers=containers)) spec = client.V1DeploymentSpec( replicas=1, # default template=template, selector={'matchLabels': labels}, ) deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name=project_hash + "-deployment"), spec=spec) v1 = client.AppsV1Api() resp = v1.create_namespaced_deployment(body=deployment, namespace="default") # print(resp) return resp
def _create_deployment(self): REPLICAS = 1 container_port = k8s.V1ContainerPort(name=self.uid[-14:], container_port=os.getenv( "OPENVAS_OMP_PORT", 9390)) resources = k8s.V1ResourceRequirements( limits={ "cpu": KubernetesDeployer.CONTAINER_USE_CPU_LIMIT, "memory": KubernetesDeployer.CONTAINER_USE_MEMORY_LIMIT, }) readiness_probe = k8s.V1Probe( _exec=k8s.V1ExecAction( command=KubernetesDeployer.OPENVAS_HEALTHCHECK_COMMAND), initial_delay_seconds=300, period_seconds=30, ) liveness_probe = k8s.V1Probe( tcp_socket=k8s.V1TCPSocketAction( port=container_port.container_port), initial_delay_seconds=180, period_seconds=30, failure_threshold=3, timeout_seconds=5, ) container = k8s.V1Container( image=KubernetesDeployer.OPENVAS_CONTAINER_IMAGE, name=self.uid, image_pull_policy="IfNotPresent", ports=[container_port], resources=resources, readiness_probe=readiness_probe, liveness_probe=liveness_probe, ) toleration = k8s.V1Toleration(effect="NoSchedule", key="Scanners", operator="Exists") pod_spec = k8s.V1PodSpec(containers=[container], tolerations=[toleration]) pod_metadata = k8s.V1ObjectMeta( name=self.uid, labels={"app.kubernetes.io/name": self.uid}, annotations={ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" }, ) pod_template = k8s.V1PodTemplateSpec(spec=pod_spec, metadata=pod_metadata) selector = k8s.V1LabelSelector( match_labels={"app.kubernetes.io/name": self.uid}) deployment_spec = k8s.V1DeploymentSpec(replicas=REPLICAS, selector=selector, template=pod_template) deployment_metadata = k8s.V1ObjectMeta( name=self.uid, labels={"app.kubernetes.io/name": self.uid}) deployment = k8s.V1Deployment(spec=deployment_spec, metadata=deployment_metadata) return k8s.AppsV1Api(self.client).create_namespaced_deployment( self.namespace, deployment)
def _build_container(self, image: str, entrypoint: str) -> Container: """ Build the primary Container for this HmlInferenceApp, using the Kubeflow Container wrapper so that we can utilise its helper methods Returns: kfp.dsl._container_op.Container """ # Define our probes for when the container is ready for action probe_action = client.V1HTTPGetAction(path="/healthz", port=self.port) probe = client.V1Probe(http_get=probe_action, initial_delay_seconds=60, period_seconds=60) container = Container( name=f"{self.name}-container", image=image, command=[entrypoint], args=["inference", "start-prod"], ports=[client.V1ContainerPort(container_port=self.port)], liveness_probe=probe, readiness_probe=probe) # The Kubeflow SDK removes the container name, so we need to add them back container.swagger_types = client.V1Container.swagger_types container.attribute_map = client.V1Container.attribute_map return container
def create_container_object(deployment_image: str, port: int, external_port: bool) -> V1Container: """ Create the container object :param deployment_image: The image name in the docker environment. :param port: port of the web_server. :param external_port: whether the port should be an external port. :return: The container object. """ liveness_probe = client.V1Probe( http_get=client.V1HTTPGetAction( port=port, path='/health' ), initial_delay_seconds=3, failure_threshold=2, period_seconds=1, ) readiness_probe = client.V1Probe( http_get=client.V1HTTPGetAction( port=port, path='/health' ), initial_delay_seconds=2, failure_threshold=2, period_seconds=1, ) port = client.V1ContainerPort( container_port=port, host_ip='0.0.0.0', host_port=port, name='prt', protocol='TCP' ) if external_port else client.V1ContainerPort( container_port=port ) return client.V1Container( name="molerelay", image=deployment_image, image_pull_policy="Never", ports=[port], liveness_probe=liveness_probe, readiness_probe=readiness_probe )
def update_deployment(project_name: str, images: list, ports: dict = {}, envs: dict = {}, namespace: str = "default"): # update deployment image tag # image add / replace project_hash = hashlib.sha256(project_name.encode()).hexdigest()[:16] deployment_name = f"{project_hash}-deployment" prev_deployment = describe_deployment(name=deployment_name) prev_containers = prev_deployment.spec.template.spec.containers # print(prev_containers) prev_images = list() for prev_container in prev_containers: prev_images.append(prev_container.image.replace(REGISTRY_PREFIX, "")) prev_image_names = [p.split(":")[0] for p in prev_images] add_patch, replace_patch = list(), list() for idx, image in enumerate(images): # different name if image.split(":")[0] not in prev_image_names: add_patch.append(image) continue # different tag or latest for i, prev_image in enumerate(prev_images): if prev_image.split(":")[0] == image.split(":")[0]: replace_patch.append((i, image)) v1 = client.AppsV1Api() for add_p in add_patch: new_container = client.V1Container( name=add_p.split(":")[0], image=REGISTRY_PREFIX + add_p, readiness_probe=client.V1Probe( initial_delay_seconds=10, period_seconds=20, tcp_socket=client.V1TCPSocketAction( port=client.V1ContainerPort(8080)))) # prev_deployment.spec.template.spec.containers.append(new_container) for replace_p in replace_patch: if replace_p[1].split(':')[1] == 'latest': prev_is = prev_deployment.spec.template.spec.containers[ replace_p[0]].readiness_probe.initial_delay_seconds now_is = 10 if prev_is > 10 else 15 prev_deployment.spec.template.spec.containers[ replace_p[0]].readiness_probe.initial_delay_seconds = now_is else: prev_deployment.spec.template.spec.containers[ replace_p[0]].image = REGISTRY_PREFIX + replace_p[1] # print(prev_deployment) resp = v1.patch_namespaced_deployment(name=deployment_name, namespace=namespace, body=prev_deployment) return resp
def create_pod(self, params): vnf_name = params.get('vnf_name') if params.get('vnf_name') else "testpythonclient" namespace = params.get('namespace') if params.get('namespace') else 'vicsnet' default_ip = params.get('default_ip') if params.get('default_ip') else None network_ips = params.get('network_ips') if params.get('network_ips') else [] image = params.get('image') if params.get('image') else "192.168.103.250:5000/icn-dtn-base-0.6.5:1.0" command = params.get('command') if params.get('command') else ["/bin/bash", "-c", "/root/start_vicsnf.sh; sleep 30d;"] envs = params.get('env')if params.get('env') else [] envs = list(map(lambda x: self.client.V1EnvVar(x.get("name"), x.get("value")), envs)) node_selector = params.get('node_selector') if params.get('node_selector') else None is_vnc = params.get('is_vnc') if params.get('is_vnc') else False # Create a body which stores the information of the pod to create body = self.client.V1Pod() # Specify meta of a POD. annotations = {} if default_ip: annotations['default_ip'] = default_ip if len(network_ips): annotations['k8s.v1.cni.cncf.io/networks'] = str(network_ips) body.metadata = client.V1ObjectMeta(namespace=namespace, name=vnf_name, annotations= annotations) """ Specify spec including: - containers (name, image, env, command) https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Container.md - nodeSelector - V1Probe (is_vnc = true) - security_context (is_vnc = true) """ readiness_probe = None security_context = None if (is_vnc): PORT = 6901 PATH = "/" SCHEME = "HTTP" http_get = client.V1HTTPGetAction(port= PORT, path= PATH, scheme= SCHEME) readiness_probe = client.V1Probe(http_get= http_get, initial_delay_seconds= 1, timeout_seconds= 1) security_context = client.V1SecurityContext(run_as_user=0, privileged= True ) container = self.client.V1Container(command=command, image=image, env=envs, name=vnf_name, working_dir='/root', security_context= security_context, readiness_probe= readiness_probe) node_selector = { "kubernetes.io/hostname": node_selector } if node_selector else None body.spec = self.client.V1PodSpec(containers= [container], node_selector=node_selector) try: api_response = self.v1Api.create_namespaced_pod(namespace, body) print(api_response) except ApiException as e: raise
def _create_probe(hc, port): ''' Create a Kubernetes probe based on info in the health check dictionary hc ''' probe_type = hc['type'] probe = None period = _parse_interval(hc.get('interval', PROBE_DEFAULT_PERIOD)) timeout = _parse_interval(hc.get('timeout', PROBE_DEFAULT_TIMEOUT)) if probe_type in ['http', 'https']: probe = client.V1Probe(failure_threshold=1, initial_delay_seconds=5, period_seconds=period, timeout_seconds=timeout, http_get=client.V1HTTPGetAction( path=hc['endpoint'], port=port, scheme=probe_type.upper())) elif probe_type in ['script', 'docker']: probe = client.V1Probe( failure_threshold=1, initial_delay_seconds=5, period_seconds=period, timeout_seconds=timeout, _exec=client.V1ExecAction(command=hc['script'].split())) return probe
def export_deployment(self): # Configureate Pod template container container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=[ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8') ], resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=[ client.V1VolumeMount(mount_path='/opt/logs', name='logs') ], liveness_probe=client.V1Probe( initial_delay_seconds=5, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0])))) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') volume = client.V1Volume( name='logs', host_path=client.V1HostPathVolumeSource(path='/opt/logs')) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secrets], volumes=[volume])) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def create_sts_spec(node_name, workload_type): sts_name = STS_PREFIX + node_name cmd = get_workload_command(workload_type, sts_name) container = client.V1Container( name=sts_name, image=IMAGE, command=["/bin/bash"], args=["-c", cmd], liveness_probe=client.V1Probe( _exec=client.V1ExecAction(command=["ls", "/mnt/" + sts_name]), initial_delay_seconds=5, period_seconds=5), volume_mounts=[ client.V1VolumeMount(name=sts_name, mount_path="/mnt/" + sts_name) ]) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": sts_name}), spec=client.V1PodSpec( node_name=node_name, restart_policy="Always", termination_grace_period_seconds=10, containers=[container], )) spec = client.V1StatefulSetSpec( replicas=0, service_name=sts_name, selector=client.V1LabelSelector(match_labels={"app": sts_name}), template=template, volume_claim_templates=[ client.V1PersistentVolumeClaim( metadata=client.V1ObjectMeta(name=sts_name), spec=client.V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], storage_class_name="longhorn", resources=client.V1ResourceRequirements( requests={"storage": "4Gi"}))) ]) statefulset = client.V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=client.V1ObjectMeta(name=sts_name), spec=spec) statefulset.spec.replicas return statefulset
def create_nlu_deployment_object(image_full_name, secret_name, tag_name, mem_allocation): memory = str(int(mem_allocation)) + "Mi" mcpu = "80m" if secret_name is not None: env_list = create_env_list(secret_name) else: env_list = [client.V1EnvVar(name="no-nlu-variables", value="true")] resourse = client.V1ResourceRequirements( requests={"memory": memory, "cpu": mcpu}, limits={"memory": memory}) liveness = client.V1Probe( http_get=client.V1HTTPGetAction( path="/healthcheck", port=5000), failure_threshold=3, period_seconds=3, initial_delay_seconds=7, timeout_seconds=2) container = client.V1Container( name=tag_name, image=image_full_name, image_pull_policy="IfNotPresent", liveness_probe=liveness, resources=resourse, ports=[client.V1ContainerPort(container_port=5000)], env=env_list) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": tag_name}), spec=client.V1PodSpec(containers=[container])) spec = client.ExtensionsV1beta1DeploymentSpec( replicas=1, template=template) deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=tag_name), spec=spec) return deployment
def load_liveness_readiness_probe(data): probe = yaml.load(data) httpGet = None if "httpGet" in probe: if "port" in probe['httpGet']: httpGet = client.V1HTTPGetAction( port=int(probe['httpGet']['port']) ) if "path" in probe['httpGet']: httpGet.path = probe['httpGet']['path'] if "host" in probe['httpGet']: httpGet.host = probe['httpGet']['host'] execLiveness = None if "exec" in probe: if probe['exec']['command']: execLiveness = client.V1ExecAction( command=probe['exec']['command'] ) v1Probe = client.V1Probe() if httpGet: v1Probe.http_get = httpGet if execLiveness: v1Probe._exec = execLiveness if "initialDelaySeconds" in probe: v1Probe.initial_delay_seconds = probe["initialDelaySeconds"] if "periodSeconds" in probe: v1Probe.period_seconds = probe["periodSeconds"] if "timeoutSeconds" in probe: v1Probe.timeout_seconds = probe["timeoutSeconds"] return v1Probe
def _build_deployment(self) -> ExtensionsV1beta1Deployment: # Define our probes for when the container is ready for action probe_action = client.V1HTTPGetAction(path="/healthz", port=self.port) probe = client.V1Probe(httpGet=probe_action, initial_delay_seconds=60, period_seconds=60) container = client.V1Container( name=self.name, image="nginx:1.7.9", ports=[client.V1ContainerPort(container_port=self.port)], liveness_probe=probe, readiness_probe=probe, ) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": self.name}), spec=client.V1PodSpec(containers=[container]), ) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=1, template=template) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.name, namespace=self.namespace), spec=spec, ) return deployment
def generate_pod(): metadata = client.V1ObjectMeta( name="platform-app-958795556-2nqgj", namespace="production", generate_name="platform-app-958795556-", labels={ "app": "platform", "chart": "platform", "component": "app", "heritage": "Helm", "pod-template-hash": "958795556", "release": "platform-production", "version": "1.0.3", }, owner_references=[ client.V1OwnerReference( api_version="apps/v1", kind="ReplicaSet", name="platform-app-958795556", uid="35ba938b-681d-11eb-a74a-16e1a04d726b", controller=True, block_owner_deletion=True, ) ], ) container = client.V1Container( name="app", image="platform.azurecr.io/app:master", image_pull_policy="Always", termination_message_policy="File", termination_message_path="/dev/termination-log", env=[], resources=client.V1ResourceRequirements( limits={ "cpu": "1200m", "memory": "1Gi" }, requests={ "cpu": "1", "memory": "768Mi" }, ), ports=[client.V1ContainerPort(container_port=3000, protocol="TCP")], volume_mounts=[ client.V1VolumeMount( name="default-token-2cg25", read_only=True, mount_path="/var/run/secrets/kubernetes.io/serviceaccount", ) ], liveness_probe=client.V1Probe( initial_delay_seconds=10, timeout_seconds=5, period_seconds=10, success_threshold=1, failure_threshold=6, http_get=client.V1HTTPGetAction(path="/health/liveness", port=3000, scheme="HTTP"), ), readiness_probe=client.V1Probe( initial_delay_seconds=10, timeout_seconds=5, period_seconds=10, success_threshold=2, failure_threshold=6, http_get=client.V1HTTPGetAction(path="/health/readness", port=3000, scheme="HTTP"), ), ) spec = client.V1PodSpec( containers=[container], volumes=[ client.V1Volume( name="default-token-2cg25", secret=client.V1SecretVolumeSource( secret_name="default-token-2cg25", default_mode=420), ) ], restart_policy="Always", termination_grace_period_seconds=30, dns_policy="ClusterFirst", service_account_name="default", service_account="default", node_name="aks-agentpool-26722002-vmss00039t", security_context=client.V1PodSecurityContext(run_as_user=1000, fs_group=1000), scheduler_name="default-scheduler", tolerations=[ client.V1Toleration( key="node.kubernetes.io/not-ready", operator="Exists", effect="NoExecute", toleration_seconds=300, ), client.V1Toleration( key="node.kubernetes.io/unreachable", operator="Exists", effect="NoExecute", toleration_seconds=300, ), ], priority=0, enable_service_links=True, ) return client.V1Pod(metadata=metadata, spec=spec)
def export_deployment(self): # Configureate Pod template container volume_mounts = [] containers = [] volumes = [] ports = [] liveness_probe = None readiness_probe = None volume_mounts.append( client.V1VolumeMount(mount_path='/docker/logs', name='logs')) volumes.append( client.V1Volume(name='logs', host_path=client.V1HostPathVolumeSource( path='/opt/logs', type='DirectoryOrCreate'))) if self.mounts: for path in self.mounts: volume_mounts.append( client.V1VolumeMount(mount_path=path, name=self.mounts[path])) volumes.append( client.V1Volume(name=self.mounts[path], host_path=client.V1HostPathVolumeSource( path=path, type='DirectoryOrCreate'))) if self.container_port: ports = [ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ] liveness_probe = client.V1Probe( initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) readiness_probe = client.V1Probe( initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) if self.healthcheck: liveness_probe = client.V1Probe( initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int(self.container_port[0]))) readiness_probe = client.V1Probe( initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int(self.container_port[0]))) Env = [ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'), client.V1EnvVar(name='POD_NAME', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='metadata.name'))), client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='status.podIP'))), ] container = client.V1Container(name=self.dm_name, image=self.image, ports=ports, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) if liveness_probe and readiness_probe: container = client.V1Container( name=self.dm_name, image=self.image, ports=ports, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=liveness_probe, readiness_probe=readiness_probe) containers.append(container) if self.sidecar: sidecar_container = client.V1Container( name='sidecar-%s' % self.dm_name, image=self.sidecar, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) containers.append(sidecar_container) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') preference_key = self.dm_name project_values = ['xxxx'] host_aliases = [] db_docker_hosts = db_op.docker_hosts values = db_docker_hosts.query.with_entities( db_docker_hosts.ip, db_docker_hosts.hostname).filter( and_(db_docker_hosts.deployment == self.dm_name, db_docker_hosts.context == self.context)).all() db_op.DB.session.remove() if values: ips = [] for value in values: try: ip, hostname = value key = "op_docker_hosts_%s" % ip Redis.lpush(key, hostname) ips.append(ip) except Exception as e: logging.error(e) for ip in set(ips): try: key = "op_docker_hosts_%s" % ip if Redis.exists(key): hostnames = Redis.lrange(key, 0, -1) if hostnames: host_aliases.append( client.V1HostAlias(hostnames=hostnames, ip=ip)) Redis.delete(key) except Exception as e: logging.error(e) if self.labels: if 'deploy' in self.labels: preference_key = self.labels['deploy'] if 'project' in self.labels: project_values = [self.labels['project']] template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=containers, image_pull_secrets=[secrets], volumes=volumes, host_aliases=host_aliases, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key=preference_key, operator='In', values=['mark']) ]), weight=100) ], required_during_scheduling_ignored_during_execution=client. V1NodeSelector(node_selector_terms=[ client.V1NodeSelectorTerm(match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=project_values) ]) ]))))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def createStatefulSet(cls, cluster_object: V1MongoClusterConfiguration) -> client.V1beta1StatefulSet: """ Creates a the stateful set configuration for the given cluster. :param cluster_object: The cluster object from the YAML file. :return: The stateful set object. """ # Parse cluster data object. name = cluster_object.metadata.name namespace = cluster_object.metadata.namespace replicas = cluster_object.spec.mongodb.replicas storage_mount_path = cluster_object.spec.mongodb.host_path or cls.DEFAULT_STORAGE_MOUNT_PATH host_path = cluster_object.spec.mongodb.host_path cpu_limit = cluster_object.spec.mongodb.cpu_limit or cls.DEFAULT_CPU_LIMIT memory_limit = cluster_object.spec.mongodb.memory_limit or cls.DEFAULT_MEMORY_LIMIT run_as_user = cluster_object.spec.mongodb.run_as_user or cls.DEFAULT_RUN_AS_USER service_account = cluster_object.spec.mongodb.service_account or cls.DEFAULT_SERVICE_ACCOUNT wired_tiger_cache_size = cluster_object.spec.mongodb.wired_tiger_cache_size or cls.DEFAULT_CACHE_SIZE secret_name = cls.ADMIN_SECRET_NAME_FORMAT.format(name) # create container mongo_container = client.V1Container( name=name, env=[client.V1EnvVar( name="POD_IP", value_from=client.V1EnvVarSource( field_ref = client.V1ObjectFieldSelector( api_version = "v1", field_path = "status.podIP" ) ) ), client.V1EnvVar( name="MONGODB_PASSWORD", value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( key="database-password", name=secret_name ) ) ), client.V1EnvVar( name="MONGODB_USER", value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( key="database-user", name=secret_name ) ) ), client.V1EnvVar( name="MONGODB_DATABASE", value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( key="database-name", name=secret_name ) ) ), client.V1EnvVar( name="MONGODB_ADMIN_PASSWORD", value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( key="database-admin-password", name=secret_name ) ) ), client.V1EnvVar( name="WIREDTIGER_CACHE_SIZE", value=wired_tiger_cache_size ), client.V1EnvVar( name="MONGODB_REPLICA_NAME", value=name ), client.V1EnvVar( name="MONGODB_SERVICE_NAME", value="svc-" + name + "-internal" ), client.V1EnvVar( name="MONGODB_KEYFILE_VALUE", value="supersecretkeyfile123" )], liveness_probe=client.V1Probe(failure_threshold=3, initial_delay_seconds=30, period_seconds=30, success_threshold=1, tcp_socket=client.V1TCPSocketAction(port=cls.MONGO_PORT), timeout_seconds=1 ), command=cls.MONGO_COMMAND.split(), image=cls.MONGO_IMAGE, image_pull_policy="Always", ports=[client.V1ContainerPort( name="mongodb", container_port=cls.MONGO_PORT, protocol="TCP" )], readiness_probe=client.V1Probe(_exec=client.V1ExecAction(command=["/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]), failure_threshold=3, initial_delay_seconds=10, period_seconds=10, success_threshold=1, timeout_seconds=1 ), security_context=client.V1SecurityContext( run_as_user=int(run_as_user), se_linux_options=client.V1SELinuxOptions( level="s0", type="spc_t" ) ), termination_message_path="/dev/termination-log", volume_mounts=[client.V1VolumeMount( name="mongo-data", read_only=False, mount_path=storage_mount_path )], resources=client.V1ResourceRequirements( limits={"cpu": cpu_limit, "memory": memory_limit}, requests={"cpu": cpu_limit, "memory": memory_limit} ) ) #create affinity rules affinity = client.V1Affinity( pod_anti_affinity=client.V1PodAntiAffinity( required_during_scheduling_ignored_during_execution=[ client.V1PodAffinityTerm(label_selector=client.V1LabelSelector( match_expressions=[client.V1LabelSelectorRequirement( key="app", operator="In", values=[name] )] ), topology_key="kubernetes.io/hostname") ] ) ) volumes = [client.V1Volume( name="mongo-data", host_path=client.V1HostPathVolumeSource(path=host_path) )] # Create stateful set. return client.V1beta1StatefulSet( metadata = client.V1ObjectMeta(annotations={"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true"}, name=name, namespace=namespace, labels=cls.createDefaultLabels(name)), spec = client.V1beta1StatefulSetSpec( replicas = replicas, service_name = "svc-" + name + "-internal", template = client.V1PodTemplateSpec( metadata = client.V1ObjectMeta(labels=cls.createDefaultLabels(name)), spec = client.V1PodSpec(affinity = affinity, containers=[mongo_container], node_selector={"compute":"mongodb"}, service_account=service_account, #restart_policy="Never", volumes=volumes ) ), ), )
def export_deployment(self): # Configureate Pod template container volume_mounts = [] volume_mounts.append( client.V1VolumeMount(mount_path='/opt/logs', name='logs')) if self.dm_name == 'launch': volume_mounts.append( client.V1VolumeMount(mount_path='/opt/%s/conf' % self.dm_name, name=self.dm_name)) container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=[ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8') ], resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=client.V1Probe( initial_delay_seconds=30, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))), readiness_probe=client.V1Probe( initial_delay_seconds=30, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0])))) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') volumes = [] volume = client.V1Volume( name='logs', host_path=client.V1HostPathVolumeSource(path='/opt/logs')) volumes.append(volume) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=[container], image_pull_secrets=[secrets], volumes=volumes, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=['moji']) ]), weight=30), client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='deploy', operator='In', values=[self.dm_name]) ]), weight=70) ])))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def template(context): """ handle yml env """ name = context.get("name") version = context.get("version") labels = { "app": name, "version": "v1" } if not version else { "app": name.strip("-v2"), "version": version } image_tag = context["name"].split('-')[1] image = context["image_namespace"] + image_tag + ":" + context[ "image_branch"] args = [arg for arg in context["args"]] if context.get("args") else None limits, requests = context["resources"]["limits"], context["resources"][ "requests"] replicas = context.get("replicas", 1) workingDir = context["workingDir"] if name == "backend-logproxy": annotations = {"sidecar.istio.io/inject": "false"} else: annotations = {"traffic.sidecar.istio.io/excludeOutboundPorts": "6379"} """ handle cmdb env """ filename = "env_" + name.split("-")[1] + ".yml" env = handle_env("/tmp/{}".format(filename)) """ k8s yaml 组件模块 """ #从svn分支configmap目录中获取相关的目录结构 parentDir, subdir = handle_configmap("configmap") volumemounts = [ client.V1VolumeMount(mount_path="/{}".format(parentDir), name="mainfiles") ] volumes = [ client.V1Volume( name="mainfiles", config_map=client.V1ConfigMapVolumeSource(name="mainfiles")) ] for dir in subdir: volumemounts.append( client.V1VolumeMount(mount_path="/{}/{}".format(parentDir, dir), name=dir)) volumes.append( client.V1Volume( name=dir, config_map=client.V1ConfigMapVolumeSource(name=dir))) if name.startswith("frontend-dispatch"): containers = [ client.V1Container( name=name, image=image, env=env, args=args, volume_mounts=volumemounts, image_pull_policy="Always", lifecycle=client.V1Lifecycle(pre_stop=client.V1Handler( _exec=client.V1ExecAction( command=["nginx", "-s", "quit"]))), readiness_probe=client.V1Probe(_exec=client.V1ExecAction( command=['cat', '/tmp/container_ready']), initial_delay_seconds=10, period_seconds=5), resources=client.V1ResourceRequirements(limits=limits, requests=requests), security_context=client.V1SecurityContext(privileged=True), working_dir=workingDir, ) ] else: containers = [ client.V1Container( name=name, image=image, env=env, args=args, volume_mounts=volumemounts, image_pull_policy="Always", readiness_probe=client.V1Probe(_exec=client.V1ExecAction( command=['cat', '/tmp/container_ready']), initial_delay_seconds=10, period_seconds=5), resources=client.V1ResourceRequirements(limits=limits, requests=requests), security_context=client.V1SecurityContext(privileged=True), working_dir=workingDir, ) ] template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels=labels, annotations=annotations), spec=client.V1PodSpec( containers=containers, dns_policy="ClusterFirst", image_pull_secrets=[ client.V1LocalObjectReference(name="image-pull-secret") ], restart_policy="Always", volumes=volumes)) spec = client.V1DeploymentSpec( replicas=replicas, selector=client.V1LabelSelector(match_labels=labels), template=template, strategy=client.ExtensionsV1beta1DeploymentStrategy( rolling_update=client.ExtensionsV1beta1RollingUpdateDeployment( max_surge=1, max_unavailable='25%'), type="RollingUpdate", ), ) return client.V1Deployment(api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name=name, labels=labels), spec=spec)
def from_runs(cls, id: str, runs: List[Run]): k8s_name = 'tensorboard-' + id run_names_hash = K8STensorboardInstance.generate_run_names_hash(runs) volume_mounts = [] for run in runs: mount = k8s.V1VolumeMount( name=cls.EXPERIMENTS_OUTPUT_VOLUME_NAME, mount_path=os.path.join( cls.TENSORBOARD_CONTAINER_MOUNT_PATH_PREFIX, run.owner, run.name), sub_path=os.path.join(run.owner, run.name)) volume_mounts.append(mount) deployment_labels = { 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id, 'runs-hash': run_names_hash } tensorboard_command = [ "tensorboard", "--logdir", cls.TENSORBOARD_CONTAINER_MOUNT_PATH_PREFIX, "--port", "6006", "--host", "127.0.0.1" ] nauta_config = NautaPlatformConfig.incluster_init() tensorboard_image = nauta_config.get_tensorboard_image() tensorboard_proxy_image = nauta_config.get_activity_proxy_image() deployment = k8s.V1Deployment( api_version='apps/v1', kind='Deployment', metadata=k8s.V1ObjectMeta(name=k8s_name, labels=deployment_labels), spec=k8s.V1DeploymentSpec( replicas=1, selector=k8s.V1LabelSelector(match_labels=deployment_labels), template=k8s.V1PodTemplateSpec( metadata=k8s.V1ObjectMeta(labels=deployment_labels), spec=k8s.V1PodSpec( tolerations=[ k8s.V1Toleration(key='master', operator='Exists', effect='NoSchedule') ], affinity=k8s. V1Affinity(node_affinity=k8s.V1NodeAffinity( required_during_scheduling_ignored_during_execution =k8s.V1NodeSelector(node_selector_terms=[ k8s.V1NodeSelectorTerm(match_expressions=[ k8s.V1NodeSelectorRequirement( key="master", operator="In", values=["True"]) ]) ]))), containers=[ k8s.V1Container(name='app', image=tensorboard_image, command=tensorboard_command, volume_mounts=volume_mounts), k8s.V1Container( name='proxy', image=tensorboard_proxy_image, ports=[k8s.V1ContainerPort(container_port=80)], readiness_probe=k8s.V1Probe( period_seconds=5, http_get=k8s.V1HTTPGetAction( path='/healthz', port=80))) ], volumes=[ k8s.V1Volume( name=cls.EXPERIMENTS_OUTPUT_VOLUME_NAME, persistent_volume_claim= # noqa k8s.V1PersistentVolumeClaimVolumeSource( claim_name=cls. EXPERIMENTS_OUTPUT_VOLUME_NAME, read_only=True)) ])))) service = k8s.V1Service( api_version='v1', kind='Service', metadata=k8s.V1ObjectMeta(name=k8s_name, labels={ 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id }), spec=k8s.V1ServiceSpec( type='ClusterIP', ports=[k8s.V1ServicePort(name='web', port=80, target_port=80)], selector={ 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id })) ingress = k8s.V1beta1Ingress( api_version='extensions/v1beta1', kind='Ingress', metadata=k8s.V1ObjectMeta( name=k8s_name, labels={ 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id }, annotations={ 'nauta.ingress.kubernetes.io/rewrite-target': '/', 'kubernetes.io/ingress.class': 'nauta-ingress' }), spec=k8s.V1beta1IngressSpec(rules=[ k8s.V1beta1IngressRule( host='localhost', http=k8s.V1beta1HTTPIngressRuleValue(paths=[ k8s.V1beta1HTTPIngressPath( path='/tb/' + id + "/", backend=k8s.V1beta1IngressBackend( service_name=k8s_name, service_port=80)) ])) ])) return cls(deployment=deployment, service=service, ingress=ingress)
def test_sanitize_k8s_container_attribute(self): # test cases for implicit type sanitization(conversion) op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'], arguments=['echo test | tee /tmp/message.txt'], file_outputs={'merged': '/tmp/message.txt'}) op.container \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path='/secret/gcp-credentials', name='gcp-credentials')) \ .add_env_variable(k8s_client.V1EnvVar( name=80, value=80)) \ .add_env_variable(k8s_client.V1EnvVar( name=80, value_from=k8s_client.V1EnvVarSource( config_map_key_ref=k8s_client.V1ConfigMapKeySelector(key=80, name=8080, optional='False'), field_ref=k8s_client.V1ObjectFieldSelector(api_version=80, field_path=8080), resource_field_ref=k8s_client.V1ResourceFieldSelector(container_name=80, divisor=8080, resource=8888), secret_key_ref=k8s_client.V1SecretKeySelector(key=80, name=8080, optional='False') ) )) \ .add_env_from(k8s_client.V1EnvFromSource( config_map_ref=k8s_client.V1ConfigMapEnvSource(name=80, optional='True'), prefix=999 )) \ .add_env_from(k8s_client.V1EnvFromSource( secret_ref=k8s_client.V1SecretEnvSource(name=80, optional='True'), prefix=888 )) \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path=111, mount_propagation=222, name=333, read_only='False', sub_path=444, sub_path_expr=555 )) \ .add_volume_devices(k8s_client.V1VolumeDevice( device_path=111, name=222 )) \ .add_port(k8s_client.V1ContainerPort( container_port='8080', host_ip=111, host_port='8888', name=222, protocol=333 )) \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='True', capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]), privileged='False', proc_mount=111, read_only_root_filesystem='False', run_as_group='222', run_as_non_root='True', run_as_user='******', se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44), windows_options=k8s_client.V1WindowsSecurityContextOptions( gmsa_credential_spec=11, gmsa_credential_spec_name=22) )) \ .set_stdin(stdin='False') \ .set_stdin_once(stdin_once='False') \ .set_termination_message_path(termination_message_path=111) \ .set_tty(tty='False') \ .set_readiness_probe(readiness_probe=k8s_client.V1Probe( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), failure_threshold='111', http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), initial_delay_seconds='222', period_seconds='333', success_threshold='444', tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'), timeout_seconds='777' )) \ .set_liveness_probe(liveness_probe=k8s_client.V1Probe( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), failure_threshold='111', http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), initial_delay_seconds='222', period_seconds='333', success_threshold='444', tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'), timeout_seconds='777' )) \ .set_lifecycle(lifecycle=k8s_client.V1Lifecycle( post_start=k8s_client.V1Handler( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666') ), pre_stop=k8s_client.V1Handler( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666') ) )) sanitize_k8s_object(op.container) for e in op.container.env: self.assertIsInstance(e.name, str) if e.value: self.assertIsInstance(e.value, str) if e.value_from: if e.value_from.config_map_key_ref: self.assertIsInstance(e.value_from.config_map_key_ref.key, str) if e.value_from.config_map_key_ref.name: self.assertIsInstance(e.value_from.config_map_key_ref.name, str) if e.value_from.config_map_key_ref.optional: self.assertIsInstance(e.value_from.config_map_key_ref.optional, bool) if e.value_from.field_ref: self.assertIsInstance(e.value_from.field_ref.field_path, str) if e.value_from.field_ref.api_version: self.assertIsInstance(e.value_from.field_ref.api_version, str) if e.value_from.resource_field_ref: self.assertIsInstance(e.value_from.resource_field_ref.resource, str) if e.value_from.resource_field_ref.container_name: self.assertIsInstance(e.value_from.resource_field_ref.container_name, str) if e.value_from.resource_field_ref.divisor: self.assertIsInstance(e.value_from.resource_field_ref.divisor, str) if e.value_from.secret_key_ref: self.assertIsInstance(e.value_from.secret_key_ref.key, str) if e.value_from.secret_key_ref.name: self.assertIsInstance(e.value_from.secret_key_ref.name, str) if e.value_from.secret_key_ref.optional: self.assertIsInstance(e.value_from.secret_key_ref.optional, bool) for e in op.container.env_from: if e.prefix: self.assertIsInstance(e.prefix, str) if e.config_map_ref: if e.config_map_ref.name: self.assertIsInstance(e.config_map_ref.name, str) if e.config_map_ref.optional: self.assertIsInstance(e.config_map_ref.optional, bool) if e.secret_ref: if e.secret_ref.name: self.assertIsInstance(e.secret_ref.name, str) if e.secret_ref.optional: self.assertIsInstance(e.secret_ref.optional, bool) for e in op.container.volume_mounts: if e.mount_path: self.assertIsInstance(e.mount_path, str) if e.mount_propagation: self.assertIsInstance(e.mount_propagation, str) if e.name: self.assertIsInstance(e.name, str) if e.read_only: self.assertIsInstance(e.read_only, bool) if e.sub_path: self.assertIsInstance(e.sub_path, str) if e.sub_path_expr: self.assertIsInstance(e.sub_path_expr, str) for e in op.container.volume_devices: if e.device_path: self.assertIsInstance(e.device_path, str) if e.name: self.assertIsInstance(e.name, str) for e in op.container.ports: if e.container_port: self.assertIsInstance(e.container_port, int) if e.host_ip: self.assertIsInstance(e.host_ip, str) if e.host_port: self.assertIsInstance(e.host_port, int) if e.name: self.assertIsInstance(e.name, str) if e.protocol: self.assertIsInstance(e.protocol, str) if op.container.security_context: e = op.container.security_context if e.allow_privilege_escalation: self.assertIsInstance(e.allow_privilege_escalation, bool) if e.capabilities: for a in e.capabilities.add: self.assertIsInstance(a, str) for d in e.capabilities.drop: self.assertIsInstance(d, str) if e.privileged: self.assertIsInstance(e.privileged, bool) if e.proc_mount: self.assertIsInstance(e.proc_mount, str) if e.read_only_root_filesystem: self.assertIsInstance(e.read_only_root_filesystem, bool) if e.run_as_group: self.assertIsInstance(e.run_as_group, int) if e.run_as_non_root: self.assertIsInstance(e.run_as_non_root, bool) if e.run_as_user: self.assertIsInstance(e.run_as_user, int) if e.se_linux_options: if e.se_linux_options.level: self.assertIsInstance(e.se_linux_options.level, str) if e.se_linux_options.role: self.assertIsInstance(e.se_linux_options.role, str) if e.se_linux_options.type: self.assertIsInstance(e.se_linux_options.type, str) if e.se_linux_options.user: self.assertIsInstance(e.se_linux_options.user, str) if e.windows_options: if e.windows_options.gmsa_credential_spec: self.assertIsInstance(e.windows_options.gmsa_credential_spec, str) if e.windows_options.gmsa_credential_spec_name: self.assertIsInstance(e.windows_options.gmsa_credential_spec_name, str) if op.container.stdin: self.assertIsInstance(op.container.stdin, bool) if op.container.stdin_once: self.assertIsInstance(op.container.stdin_once, bool) if op.container.termination_message_path: self.assertIsInstance(op.container.termination_message_path, str) if op.container.tty: self.assertIsInstance(op.container.tty, bool) for e in [op.container.readiness_probe, op.container.liveness_probe]: if e: if e._exec: for c in e._exec.command: self.assertIsInstance(c, str) if e.failure_threshold: self.assertIsInstance(e.failure_threshold, int) if e.http_get: if e.http_get.host: self.assertIsInstance(e.http_get.host, str) if e.http_get.http_headers: for h in e.http_get.http_headers: if h.name: self.assertIsInstance(h.name, str) if h.value: self.assertIsInstance(h.value, str) if e.http_get.path: self.assertIsInstance(e.http_get.path, str) if e.http_get.port: self.assertIsInstance(e.http_get.port, (str, int)) if e.http_get.scheme: self.assertIsInstance(e.http_get.scheme, str) if e.initial_delay_seconds: self.assertIsInstance(e.initial_delay_seconds, int) if e.period_seconds: self.assertIsInstance(e.period_seconds, int) if e.success_threshold: self.assertIsInstance(e.success_threshold, int) if e.tcp_socket: if e.tcp_socket.host: self.assertIsInstance(e.tcp_socket.host, str) if e.tcp_socket.port: self.assertIsInstance(e.tcp_socket.port, (str, int)) if e.timeout_seconds: self.assertIsInstance(e.timeout_seconds, int) if op.container.lifecycle: for e in [op.container.lifecycle.post_start, op.container.lifecycle.pre_stop]: if e: if e._exec: for c in e._exec.command: self.assertIsInstance(c, str) if e.http_get: if e.http_get.host: self.assertIsInstance(e.http_get.host, str) if e.http_get.http_headers: for h in e.http_get.http_headers: if h.name: self.assertIsInstance(h.name, str) if h.value: self.assertIsInstance(h.value, str) if e.http_get.path: self.assertIsInstance(e.http_get.path, str) if e.http_get.port: self.assertIsInstance(e.http_get.port, (str, int)) if e.http_get.scheme: self.assertIsInstance(e.http_get.scheme, str) if e.tcp_socket: if e.tcp_socket.host: self.assertIsInstance(e.tcp_socket.host, str) if e.tcp_socket.port: self.assertIsInstance(e.tcp_socket.port, (str, int)) # test cases for checking value after sanitization check_value_op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'], arguments=['echo test | tee /tmp/message.txt'], file_outputs={'merged': '/tmp/message.txt'}) check_value_op.container \ .add_env_variable(k8s_client.V1EnvVar( name=80, value=8080)) \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='true', capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]), privileged='false', proc_mount=111, read_only_root_filesystem='False', run_as_group='222', run_as_non_root='True', run_as_user='******', se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44), windows_options=k8s_client.V1WindowsSecurityContextOptions( gmsa_credential_spec=11, gmsa_credential_spec_name=22) )) sanitize_k8s_object(check_value_op.container) self.assertEqual(check_value_op.container.env[0].name, '80') self.assertEqual(check_value_op.container.env[0].value, '8080') self.assertEqual(check_value_op.container.security_context.allow_privilege_escalation, True) self.assertEqual(check_value_op.container.security_context.capabilities.add[0], '11') self.assertEqual(check_value_op.container.security_context.capabilities.add[1], '22') self.assertEqual(check_value_op.container.security_context.capabilities.drop[0], '33') self.assertEqual(check_value_op.container.security_context.capabilities.drop[1], '44') self.assertEqual(check_value_op.container.security_context.privileged, False) self.assertEqual(check_value_op.container.security_context.proc_mount, '111') self.assertEqual(check_value_op.container.security_context.read_only_root_filesystem, False) self.assertEqual(check_value_op.container.security_context.run_as_group, 222) self.assertEqual(check_value_op.container.security_context.run_as_non_root, True) self.assertEqual(check_value_op.container.security_context.run_as_user, 333) self.assertEqual(check_value_op.container.security_context.se_linux_options.level, '11') self.assertEqual(check_value_op.container.security_context.se_linux_options.role, '22') self.assertEqual(check_value_op.container.security_context.se_linux_options.type, '33') self.assertEqual(check_value_op.container.security_context.se_linux_options.user, '44') self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec, '11') self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec_name, '22') # test cases for exception with self.assertRaises(ValueError, msg='Invalid boolean string 2. Should be boolean.'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation=1 )) sanitize_k8s_object(exception_op.container) with self.assertRaises(ValueError, msg='Invalid boolean string Test. Should be "true" or "false".'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='Test' )) sanitize_k8s_object(exception_op.container) with self.assertRaises(ValueError, msg='Invalid test. Should be integer.'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( run_as_group='test', )) sanitize_k8s_object(exception_op.container)
def specifications(self): """ Set the deployment specifications. :returns: The deployment specifications. :rtype: client.ExtensionsV1beta1Deployment """ #1. Set the ports ports = [client.V1ContainerPort(container_port=port['number'], protocol=port['protocol']) for port in self.definition['ports']] #2. Set the resources #resources = {'cpu':self.definition['cpu'], 'memory': self.definition['memory']} resources = {'cpu':'100m', 'memory': '100Mi'} resources = client.V1ResourceRequirements(limits=resources, requests=resources) #3. Set healthchecks readiness = None liveness = None for health in self.definition['healthchecks']: _exec, tcp_socket, http_get = None, None, None if health['command'] == 'COMMAND': _exec = client.V1ExecAction(command=health['value']) elif health['command'] == 'TCP': tcp_socket = client.V1TCPSocketAction(port=health['port']) else: http_get = client.V1HTTPGetAction(path=health['path'], port=health['port'], scheme=health['command']) probe = client.V1Probe(failure_threshold=self.definition['failure_threshold'], initial_delay_seconds=self.definition['initial_delay_seconds'], period_seconds=self.definition['interval_seconds'], success_threshold=self.definition['success_threshold'], timeout_seconds=self.definition['timeout_seconds'], _exec=_exec, tcp_socket=tcp_socket, http_get=http_get ) if health['type'] == 'readiness': readiness = probe else: liveness = probe #4. Security context security = client.V1SecurityContext(allow_privilege_escalation=False, run_as_non_root=True) #5. Set the container container = client.V1Container( name=self.definition['repo'], image=self.definition['tag'], ports=ports, resources=resources, readiness_probe=readiness, liveness_probe=liveness, security_context=security, env=None)#No need to setup envs : already done in the dockerfile #6. The pod template template = client.V1PodTemplateSpec( client.V1ObjectMeta(labels={"name": self.definition['repo'], "repo": self.definition['repo'], "owner": self.definition['owner'], "branch": self.definition['branch'], "fullname": self.definition['name']}), spec=client.V1PodSpec(containers=[container])) #7. The rolling update strategy strategy = client.ExtensionsV1beta1DeploymentStrategy( type='RollingUpdate', rolling_update=client.ExtensionsV1beta1RollingUpdateDeployment( max_surge=1, max_unavailable=1 ) ) #8. The deployment spec spec = client.ExtensionsV1beta1DeploymentSpec( #replicas=self.definition['instances'], replicas=1, strategy=strategy, template=template) #9. The deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.definition['repo'], labels={"uid": self.definition['uid'], "repo": self.definition['repo'], "owner": self.definition['owner'], "branch": self.definition['branch'], "fullname": self.definition['name'], "name": self.definition['repo']}), spec=spec) return deployment
def createStatefulSet(username, replicas, image, is_host_network=False, ssh_port="22"): statefulset_name = username + "-horovod" statefulset = client.V1StatefulSet() #statefulset.api_version="apps/v1beta2" statefulset.metadata = client.V1ObjectMeta(name=statefulset_name, labels={ "app": "horovod", "user": username, "role": "worker" }) label_selector = client.V1LabelSelector(match_labels={ "app": "horovod", "user": username, "role": "worker" }) # Pod template 정의 pod_template = client.V1PodTemplateSpec() pod_template.metadata = client.V1ObjectMeta(labels={ "app": "horovod", "user": username, "role": "worker" }) container = client.V1Container(name="worker") container.image = image container.image_pull_policy = "IfNotPresent" container.env = [ client.V1EnvVar(name="SSHPORT", value=ssh_port), client.V1EnvVar(name="USESECRETS", value="true"), # TODO: 바꾸기 client.V1EnvVar(name="ENTRY_POINT", value="train.py") ] container.ports = [client.V1ContainerPort(container_port=22)] container.volume_mounts = [ client.V1VolumeMount(name=statefulset_name + "-cm", mount_path="/horovod/generated"), client.V1VolumeMount(name=statefulset_name + "-secret", mount_path="/etc/secret-volume", read_only=True), client.V1VolumeMount(name=statefulset_name + "-data", mount_path="/horovod/data") ] container.command = ["/horovod/generated/run.sh"] container.readiness_probe = client.V1Probe( _exec=client.V1ExecAction(command=["/horovod/generated/check.sh"]), initial_delay_seconds=1, period_seconds=2) pod_spec = client.V1PodSpec(containers=[container]) # Host Network가 설정되있다면 if is_host_network == True: pod_spec.host_network = True pod_spec.dns_policy = "ClusterFirstWithHostNet" pod_spec.volumes = [ client.V1Volume(name=statefulset_name + "-cm", config_map=client.V1ConfigMapVolumeSource( name=statefulset_name, items=[ client.V1KeyToPath(key="hostfile.config", path="hostfile", mode=438), client.V1KeyToPath(key="ssh.readiness", path="check.sh", mode=365), client.V1KeyToPath(key="worker.run", path="run.sh", mode=365) ])), client.V1Volume(name=statefulset_name + "-secret", secret=client.V1SecretVolumeSource( secret_name=statefulset_name, default_mode=448, items=[ client.V1KeyToPath(key="host-key", path="id_rsa"), client.V1KeyToPath(key="host-key-pub", path="authorized_keys") ])), client.V1Volume(name=statefulset_name + "-data", empty_dir=client.V1EmptyDirVolumeSource()) ] pod_spec.subdomain = statefulset_name pod_spec.hostname = statefulset_name pod_spec.init_containers = [ client.V1Container( name="download-data", image=image, image_pull_policy="IfNotPresent", command=["/bin/bash", "-c"], args=[ "curl http://ywj-horovod.s3.ap-northeast-2.amazonaws.com/horovod/" + username + "/train.py > /horovod/data/train.py" ], volume_mounts=[ client.V1VolumeMount(name=statefulset_name + "-data", mount_path="/horovod/data") ]) ] pod_template.spec = pod_spec statefulset.spec = client.V1StatefulSetSpec( selector=label_selector, service_name=statefulset_name + "-worker", # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity pod_management_policy="Parallel", replicas=replicas, template=pod_template) return statefulset
def create_deploy(): error = "" if request.method == "POST": data = json.loads(request.get_data().decode("utf-8")) project = data.get("project").strip() environment = data.get("environment").strip() cluster = data.get("cluster").strip() imageRepo = data.get("imageRepo").strip() imageName = data.get("imageName").strip() imageTag = data.get("imageTag").strip() imagePullPolicy = data.get("imagePullPolicy").strip() imagePullSecret = data.get("imagePullSecret").strip() containerPort = str_to_int(data.get("containerPort").strip()) replicas = data.get("replicas").strip() cpu = data.get("cpu").strip() memory = data.get("memory").strip() label_key1 = data.get("label_key1").strip() label_value1 = data.get("label_value1").strip() label_key2 = data.get("label_key2").strip() label_value2 = data.get("label_value2").strip() env = data.get("env").strip() volumeMount = data.get("volumeMount").strip() updateType = data.get("updateType").strip() probeType = data.get("probeType").strip() healthCheck = data.get("healthCheck").strip() healthPath = data.get("healthPath").strip() initialDelaySeconds = str_to_int( data.get("initialDelaySeconds").strip()) periodSeconds = str_to_int(data.get("periodSeconds").strip()) failureThreshold = str_to_int(data.get("failureThreshold").strip()) healthTimeout = str_to_int(data.get("healthTimeout").strip()) healthCmd = data.get("healthCmd").strip() liveness_probe = None readiness_probe = None if (healthCheck == "true"): if (probeType == "tcp"): liveness_probe = client.V1Probe(initial_delay_seconds=initialDelaySeconds,\ period_seconds = periodSeconds,\ timeout_seconds = healthTimeout ,\ failure_threshold = failureThreshold,\ tcp_socket=client.V1TCPSocketAction(port=containerPort)) readiness_probe = liveness_probe elif (probeType == "http"): liveness_probe = client.V1Probe(initial_delay_seconds=initialDelaySeconds,\ period_seconds = periodSeconds,\ timeout_seconds = healthTimeout ,\ failure_threshold = failureThreshold,\ http_get=client.V1HTTPGetAction(path=healthPath,port=containerPort)) readiness_probe = liveness_probe elif (probeType == "cmd"): pass else: pass if (containerPort == 1): error = "容器端口不能为空" if (imageRepo == "" or project == "" or environment == "" or imageName == "" or imageTag == ""): error = "镜像相关不能为空" if (label_key1 == "" or label_value1 == ""): error = "label相关数据不能为空(至少输入一对key/value)" replicas = str_to_int(replicas) cpu = int(1000 * (str_to_float(cpu))) memory = int(1024 * (str_to_float(memory))) if (error != ""): # print(error) return jsonify({"error": 1002, "msg": error}) #ms-dev namespace = project + "-" + environment # myhub.mydocker.com/ms-dev/base:v1.0 image = imageRepo + "/" + project + "-" + environment + "/" + imageName + ":" + imageTag labels = {label_key1: label_value1} if (label_key2 != "" and label_value2 != ""): labels[label_key2] = label_value2 myclient = client.AppsV1Api() deployment = create_deployment_object(name=imageName,namespace=namespace,image=image,port=containerPort,\ image_pull_policy=imagePullPolicy,imagePullSecret=imagePullSecret ,labels=labels,replicas=replicas,cpu=cpu,memory=memory,\ liveness_probe=liveness_probe,readiness_probe=readiness_probe) # print(type(deployment)) to_yaml = yaml.load(json.dumps(deployment, indent=4, cls=MyEncoder)) file = os.path.join(dir_path, "demo-deployment.yaml") stream = open(file, 'w') yaml.safe_dump(to_yaml, stream, default_flow_style=False) status = create_deployment(api_instance=myclient, namespace=namespace, deployment=deployment) return json.dumps(deployment, indent=4, cls=MyEncoder) return jsonify({'a': 1})
def generate_stateful_set(self): volume_claim_spec = client.V1PersistentVolumeClaimSpec(**self.volume_claim_spec) if not volume_claim_spec.access_modes: volume_claim_spec.access_modes = ["ReadWriteOnce"] if not volume_claim_spec.resources: volume_claim_spec.resources = client.V1ResourceRequirements( requests={"storage": "20Gi"} ) stateful_set = client.V1beta1StatefulSet( metadata=self.metadata, spec=client.V1beta1StatefulSetSpec( # we can't update service name or pod management policy service_name=self.full_name + "-headless", pod_management_policy="Parallel", # we can't update volume claim templates volume_claim_templates=[client.V1PersistentVolumeClaim( metadata=client.V1ObjectMeta( name="image-store", ), spec=volume_claim_spec, )] ) ) stateful_set.spec.replicas = 2 pod_labels = {'component': 'registry'} pod_labels.update(self.labels) volumes = [] if self.ca_certificate_bundle: volumes = [ client.V1Volume( name=self.ca_certificate_bundle, config_map=client.V1ConfigMapVolumeSource( name=self.ca_certificate_bundle ) ) ] volumes.append( client.V1Volume( name="tls", secret=client.V1SecretVolumeSource( secret_name=self.docker_certificate_secret ), ) ) volumes_to_mount = [ client.V1VolumeMount( name="image-store", mount_path="/var/lib/registry" ), client.V1VolumeMount( name="tls", mount_path="/etc/registry-certs", read_only=True ) ] if self.ca_certificate_bundle: volumes_to_mount.append( client.V1VolumeMount( name=self.ca_certificate_bundle, mount_path="/etc/ssl/certs", read_only=True ) ) env = [client.V1EnvVar(name="REGISTRY_PROXY_REMOTEURL", value="https://" + self.upstreamUrl), client.V1EnvVar(name="REGISTRY_HTTP_ADDR", value=":5000"), client.V1EnvVar(name="REGISTRY_HTTP_DEBUG_ADDR", value="localhost:6000"), client.V1EnvVar(name="REGISTRY_HTTP_TLS_CERTIFICATE", value="/etc/registry-certs/tls.crt"), client.V1EnvVar(name="REGISTRY_HTTP_TLS_KEY", value="/etc/registry-certs/tls.key"), client.V1EnvVar(name="REGISTRY_LOG_ACCESSLOG_DISABLED", value="true"), client.V1EnvVar(name="REGISTRY_LOG_FORMATTER", value="logstash"), client.V1EnvVar(name="REGISTRY_STORAGE_DELETE_ENABLED", value="true"), client.V1EnvVar(name="REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", value="/var/lib/registry") ] env = self.handle_proxy_credentials(env) stateful_set.spec.template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels=pod_labels ), spec=client.V1PodSpec( init_containers=[ client.V1Container( name="validate-state-file", image="python:3.6-alpine", env=[ client.V1EnvVar( name="STATE_FILE", value="/var/lib/registry/scheduler-state.json" ), client.V1EnvVar( name="LOWER_LIMIT", value="1024" ), ], volume_mounts=[ client.V1VolumeMount( name="image-store", mount_path="/var/lib/registry" ) ], command=[ "sh", "-e", "-c", "touch $STATE_FILE; if [[ $(stat -c \"%s\" $STATE_FILE) -lt $LOWER_LIMIT ]]; then rm -f $STATE_FILE; else cat $STATE_FILE | python -m json.tool > /dev/null 2>&1 || rm -f $STATE_FILE; fi" # noqa ] ) ], containers=[ client.V1Container( name="registry", image="registry:2.6.0", env=env, readiness_probe=client.V1Probe( http_get=client.V1HTTPGetAction( path="/", port=5000, scheme="HTTPS" ), initial_delay_seconds=3, period_seconds=3 ), ports=[client.V1ContainerPort( container_port=5000, name="https" )], resources=client.V1ResourceRequirements( requests={"cpu": "0.1", "memory": "500Mi"}, limits={"cpu": "0.5", "memory": "500Mi"} ), volume_mounts=volumes_to_mount, ) ], termination_grace_period_seconds=10, volumes=volumes, ) ) stateful_set.spec.update_strategy = client.V1beta1StatefulSetUpdateStrategy(type="RollingUpdate",) return stateful_set
def create_deployment_old(config_file): """ Create IBM Spectrum Scale CSI Operator deployment object in operator namespace using deployment_operator_image_for_crd and deployment_driver_image_for_crd parameters from config.json file Args: param1: config_file - configuration json file Returns: None Raises: Raises an exception on kubernetes client api failure and asserts """ deployment_apps_api_instance = client.AppsV1Api() deployment_labels = { "app.kubernetes.io/instance": "ibm-spectrum-scale-csi-operator", "app.kubernetes.io/managed-by": "ibm-spectrum-scale-csi-operator", "app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator", "product": "ibm-spectrum-scale-csi", "release": "ibm-spectrum-scale-csi-operator" } deployment_annotations = { "productID": "ibm-spectrum-scale-csi-operator", "productName": "IBM Spectrum Scale CSI Operator", "productVersion": "2.0.0" } deployment_metadata = client.V1ObjectMeta( name="ibm-spectrum-scale-csi-operator", labels=deployment_labels, namespace=namespace_value) deployment_selector = client.V1LabelSelector( match_labels={ "app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator" }) podtemplate_metadata = client.V1ObjectMeta( labels=deployment_labels, annotations=deployment_annotations) pod_affinity = client.V1Affinity(node_affinity=client.V1NodeAffinity( required_during_scheduling_ignored_during_execution=client. V1NodeSelector(node_selector_terms=[ client.V1NodeSelectorTerm(match_expressions=[ client.V1NodeSelectorRequirement(key="beta.kubernetes.io/arch", operator="Exists") ]) ]))) ansible_pod_container = client.V1Container( image=config_file["deployment_operator_image_for_crd"], command=[ "/usr/local/bin/ao-logs", "/tmp/ansible-operator/runner", "stdout" ], liveness_probe=client.V1Probe( _exec=client.V1ExecAction(command=["/health_check.sh"]), initial_delay_seconds=10, period_seconds=30), readiness_probe=client.V1Probe( _exec=client.V1ExecAction(command=["/health_check.sh"]), initial_delay_seconds=3, period_seconds=1), name="ansible", image_pull_policy="IfNotPresent", security_context=client.V1SecurityContext( capabilities=client.V1Capabilities(drop=["ALL"])), volume_mounts=[ client.V1VolumeMount(mount_path="/tmp/ansible-operator/runner", name="runner", read_only=True) ], env=[ client.V1EnvVar( name="CSI_DRIVER_IMAGE", value=config_file["deployment_driver_image_for_crd"]) ]) operator_pod_container = client.V1Container( image=config_file["deployment_operator_image_for_crd"], name="operator", image_pull_policy="IfNotPresent", liveness_probe=client.V1Probe( _exec=client.V1ExecAction(command=["/health_check.sh"]), initial_delay_seconds=10, period_seconds=30), readiness_probe=client.V1Probe( _exec=client.V1ExecAction(command=["/health_check.sh"]), initial_delay_seconds=3, period_seconds=1), security_context=client.V1SecurityContext( capabilities=client.V1Capabilities(drop=["ALL"])), env=[ client.V1EnvVar(name="WATCH_NAMESPACE", value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path="metadata.namespace"))), client.V1EnvVar(name="POD_NAME", value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path="metadata.name"))), client.V1EnvVar(name="OPERATOR_NAME", value="ibm-spectrum-scale-csi-operator"), client.V1EnvVar( name="CSI_DRIVER_IMAGE", value=config_file["deployment_driver_image_for_crd"]) ], volume_mounts=[ client.V1VolumeMount(mount_path="/tmp/ansible-operator/runner", name="runner") ]) pod_spec = client.V1PodSpec( affinity=pod_affinity, containers=[ansible_pod_container, operator_pod_container], service_account_name="ibm-spectrum-scale-csi-operator", volumes=[ client.V1Volume( empty_dir=client.V1EmptyDirVolumeSource(medium="Memory"), name="runner") ]) podtemplate_spec = client.V1PodTemplateSpec(metadata=podtemplate_metadata, spec=pod_spec) deployment_spec = client.V1DeploymentSpec(replicas=1, selector=deployment_selector, template=podtemplate_spec) body_dep = client.V1Deployment(kind='Deployment', api_version='apps/v1', metadata=deployment_metadata, spec=deployment_spec) try: LOGGER.info("creating deployment for operator") deployment_apps_api_response = deployment_apps_api_instance.create_namespaced_deployment( namespace=namespace_value, body=body_dep) LOGGER.debug(str(deployment_apps_api_response)) except ApiException as e: LOGGER.error( f"Exception when calling RbacAuthorizationV1Api->create_namespaced_deployment: {e}" ) assert False
def template(context): pod_spec_volumes = [] pod_spec_volume_mounts = [] stateful_set_spec_volume_claim_templates = [] for pvc in context.get("pvc"): stateful_set_spec_volume_claim_templates.append( client.V1PersistentVolumeClaim( metadata=client.V1ObjectMeta( name=pvc["name"], annotations={ "volume.beta.kubernetes.io/storage-class": pvc["class"] }, ), spec=client.V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=client.V1ResourceRequirements( requests={"storage": pvc["size"]} ), ), ) ) pod_spec_volume_mounts.append( client.V1VolumeMount(name=pvc["name"], mount_path=pvc["mountPath"]) ) if "configmap" in context: volume_name = "{}-config".format(context["name"]) pod_spec_volumes.append( client.V1Volume( name=volume_name, config_map=client.V1ConfigMapVolumeSource(name=context["name"]), ) ) pod_spec_volume_mounts.append( client.V1VolumeMount(name=volume_name, mount_path="/etc/postgresql/") ) labels = {"app": context["name"]} pg_isready_exec = client.V1ExecAction(command=["gosu postgres pg_isready"]) return client.V1StatefulSet( api_version="apps/v1beta1", kind="StatefulSet", metadata=client.V1ObjectMeta(name=context["name"]), spec=client.V1StatefulSetSpec( service_name=context["name"], replicas=context["replicas"], selector=client.V1LabelSelector(match_labels=labels), template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels=labels), spec=client.V1PodSpec( containers=[ client.V1Container( name="postgres", image=context["image"], lifecycle=client.V1Lifecycle( pre_stop=client.V1Handler( _exec=client.V1ExecAction( command=[ 'gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop' ] ) ) ), liveness_probe=client.V1Probe( _exec=pg_isready_exec, initial_delay_seconds=120, timeout_seconds=5, failure_threshold=6, ), readiness_probe=client.V1Probe( _exec=pg_isready_exec, initial_delay_seconds=10, timeout_seconds=5, period_seconds=30, failure_threshold=999, ), ports=[client.V1ContainerPort(container_port=5432)], volume_mounts=pod_spec_volume_mounts, resources=client.V1ResourceRequirements( **context["resources"] ) if "resources" in context else None, ) ], volumes=pod_spec_volumes, node_selector=context.get("nodeSelector"), ), ), volume_claim_templates=stateful_set_spec_volume_claim_templates, ), )
def export_deployment(self): # Configureate Pod template container volume_mounts = [] containers = [] volumes = [] volume_mounts.append( client.V1VolumeMount(mount_path='/docker/logs', name='logs')) volumes.append( client.V1Volume(name='logs', host_path=client.V1HostPathVolumeSource( path='/opt/logs', type='DirectoryOrCreate'))) if self.mounts: for path in self.mounts: volume_mounts.append( client.V1VolumeMount(mount_path=path, name=self.mounts[path])) volumes.append( client.V1Volume(name=self.mounts[path], host_path=client.V1HostPathVolumeSource( path=path, type='DirectoryOrCreate'))) liveness_probe = client.V1Probe(initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) readiness_probe = client.V1Probe(initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) if self.healthcheck: liveness_probe = client.V1Probe(initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int( self.container_port[0]))) readiness_probe = client.V1Probe(initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int( self.container_port[0]))) Env = [ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'), client.V1EnvVar(name='POD_NAME', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='metadata.name'))), client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='status.podIP'))), ] container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=liveness_probe, readiness_probe=readiness_probe) containers.append(container) if self.sidecar: sidecar_container = client.V1Container( name='sidecar-%s' % self.dm_name, image=self.sidecar, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) containers.append(sidecar_container) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=containers, image_pull_secrets=[secrets], volumes=volumes, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=['moji']) ]), weight=30), client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='deploy', operator='In', values=[self.dm_name]) ]), weight=70) ])))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def get_container(self, volume_mounts: List[client.V1VolumeMount]): # We want to serve static files in development, i.e admin page command = [ "python", "manage.py", "runserver", "0.0.0.0:8080" ] if self.context.minikube else None return client.V1Container( image=self.context.image, image_pull_policy='IfNotPresent', name='django-app', ports=[ client.V1ContainerPort( container_port=8080 ) ], command=command, volume_mounts=volume_mounts, env=[ client.V1EnvVar( name='DATABASE_URL', value='postgres://*****:*****@postgres-service:5432/dev_db' ), client.V1EnvVar( name='ALLOWED_HOSTS', value='.kangox.com,127.0.0.1,[::1],localhost' ), client.V1EnvVar( name='RABBITMQ_PASSWORD', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name='rabbitmq', key='rabbitmq-password' ) ) ) ], readiness_probe=client.V1Probe( http_get=client.V1HTTPGetAction( path='/ping/', port=8080, http_headers=[ client.V1HTTPHeader( name='Host', value='127.0.0.1' ) ] ), period_seconds=5, initial_delay_seconds=5, failure_threshold=1, ), liveness_probe=client.V1Probe( http_get=client.V1HTTPGetAction( path='/ping/', port=8080, http_headers=[ client.V1HTTPHeader( name='Host', value='127.0.0.1' ) ] ), period_seconds=5, initial_delay_seconds=5, failure_threshold=1 ) )