def _build_container(self, image: str, entrypoint: str) -> Container: """ Build the primary Container for this HmlInferenceApp, using the Kubeflow Container wrapper so that we can utilise its helper methods Returns: kfp.dsl._container_op.Container """ # Define our probes for when the container is ready for action probe_action = client.V1HTTPGetAction(path="/healthz", port=self.port) probe = client.V1Probe(http_get=probe_action, initial_delay_seconds=60, period_seconds=60) container = Container( name=f"{self.name}-container", image=image, command=[entrypoint], args=["inference", "start-prod"], ports=[client.V1ContainerPort(container_port=self.port)], liveness_probe=probe, readiness_probe=probe) # The Kubeflow SDK removes the container name, so we need to add them back container.swagger_types = client.V1Container.swagger_types container.attribute_map = client.V1Container.attribute_map return container
def create_container_object(deployment_image: str, port: int, external_port: bool) -> V1Container: """ Create the container object :param deployment_image: The image name in the docker environment. :param port: port of the web_server. :param external_port: whether the port should be an external port. :return: The container object. """ liveness_probe = client.V1Probe( http_get=client.V1HTTPGetAction( port=port, path='/health' ), initial_delay_seconds=3, failure_threshold=2, period_seconds=1, ) readiness_probe = client.V1Probe( http_get=client.V1HTTPGetAction( port=port, path='/health' ), initial_delay_seconds=2, failure_threshold=2, period_seconds=1, ) port = client.V1ContainerPort( container_port=port, host_ip='0.0.0.0', host_port=port, name='prt', protocol='TCP' ) if external_port else client.V1ContainerPort( container_port=port ) return client.V1Container( name="molerelay", image=deployment_image, image_pull_policy="Never", ports=[port], liveness_probe=liveness_probe, readiness_probe=readiness_probe )
def create_pod(self, params): vnf_name = params.get('vnf_name') if params.get('vnf_name') else "testpythonclient" namespace = params.get('namespace') if params.get('namespace') else 'vicsnet' default_ip = params.get('default_ip') if params.get('default_ip') else None network_ips = params.get('network_ips') if params.get('network_ips') else [] image = params.get('image') if params.get('image') else "192.168.103.250:5000/icn-dtn-base-0.6.5:1.0" command = params.get('command') if params.get('command') else ["/bin/bash", "-c", "/root/start_vicsnf.sh; sleep 30d;"] envs = params.get('env')if params.get('env') else [] envs = list(map(lambda x: self.client.V1EnvVar(x.get("name"), x.get("value")), envs)) node_selector = params.get('node_selector') if params.get('node_selector') else None is_vnc = params.get('is_vnc') if params.get('is_vnc') else False # Create a body which stores the information of the pod to create body = self.client.V1Pod() # Specify meta of a POD. annotations = {} if default_ip: annotations['default_ip'] = default_ip if len(network_ips): annotations['k8s.v1.cni.cncf.io/networks'] = str(network_ips) body.metadata = client.V1ObjectMeta(namespace=namespace, name=vnf_name, annotations= annotations) """ Specify spec including: - containers (name, image, env, command) https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Container.md - nodeSelector - V1Probe (is_vnc = true) - security_context (is_vnc = true) """ readiness_probe = None security_context = None if (is_vnc): PORT = 6901 PATH = "/" SCHEME = "HTTP" http_get = client.V1HTTPGetAction(port= PORT, path= PATH, scheme= SCHEME) readiness_probe = client.V1Probe(http_get= http_get, initial_delay_seconds= 1, timeout_seconds= 1) security_context = client.V1SecurityContext(run_as_user=0, privileged= True ) container = self.client.V1Container(command=command, image=image, env=envs, name=vnf_name, working_dir='/root', security_context= security_context, readiness_probe= readiness_probe) node_selector = { "kubernetes.io/hostname": node_selector } if node_selector else None body.spec = self.client.V1PodSpec(containers= [container], node_selector=node_selector) try: api_response = self.v1Api.create_namespaced_pod(namespace, body) print(api_response) except ApiException as e: raise
def create_nlu_deployment_object(image_full_name, secret_name, tag_name, mem_allocation): memory = str(int(mem_allocation)) + "Mi" mcpu = "80m" if secret_name is not None: env_list = create_env_list(secret_name) else: env_list = [client.V1EnvVar(name="no-nlu-variables", value="true")] resourse = client.V1ResourceRequirements( requests={"memory": memory, "cpu": mcpu}, limits={"memory": memory}) liveness = client.V1Probe( http_get=client.V1HTTPGetAction( path="/healthcheck", port=5000), failure_threshold=3, period_seconds=3, initial_delay_seconds=7, timeout_seconds=2) container = client.V1Container( name=tag_name, image=image_full_name, image_pull_policy="IfNotPresent", liveness_probe=liveness, resources=resourse, ports=[client.V1ContainerPort(container_port=5000)], env=env_list) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": tag_name}), spec=client.V1PodSpec(containers=[container])) spec = client.ExtensionsV1beta1DeploymentSpec( replicas=1, template=template) deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=tag_name), spec=spec) return deployment
def load_liveness_readiness_probe(data): probe = yaml.load(data) httpGet = None if "httpGet" in probe: if "port" in probe['httpGet']: httpGet = client.V1HTTPGetAction( port=int(probe['httpGet']['port']) ) if "path" in probe['httpGet']: httpGet.path = probe['httpGet']['path'] if "host" in probe['httpGet']: httpGet.host = probe['httpGet']['host'] execLiveness = None if "exec" in probe: if probe['exec']['command']: execLiveness = client.V1ExecAction( command=probe['exec']['command'] ) v1Probe = client.V1Probe() if httpGet: v1Probe.http_get = httpGet if execLiveness: v1Probe._exec = execLiveness if "initialDelaySeconds" in probe: v1Probe.initial_delay_seconds = probe["initialDelaySeconds"] if "periodSeconds" in probe: v1Probe.period_seconds = probe["periodSeconds"] if "timeoutSeconds" in probe: v1Probe.timeout_seconds = probe["timeoutSeconds"] return v1Probe
def _create_probe(hc, port): ''' Create a Kubernetes probe based on info in the health check dictionary hc ''' probe_type = hc['type'] probe = None period = _parse_interval(hc.get('interval', PROBE_DEFAULT_PERIOD)) timeout = _parse_interval(hc.get('timeout', PROBE_DEFAULT_TIMEOUT)) if probe_type in ['http', 'https']: probe = client.V1Probe(failure_threshold=1, initial_delay_seconds=5, period_seconds=period, timeout_seconds=timeout, http_get=client.V1HTTPGetAction( path=hc['endpoint'], port=port, scheme=probe_type.upper())) elif probe_type in ['script', 'docker']: probe = client.V1Probe( failure_threshold=1, initial_delay_seconds=5, period_seconds=period, timeout_seconds=timeout, _exec=client.V1ExecAction(command=hc['script'].split())) return probe
def _build_deployment(self) -> ExtensionsV1beta1Deployment: # Define our probes for when the container is ready for action probe_action = client.V1HTTPGetAction(path="/healthz", port=self.port) probe = client.V1Probe(httpGet=probe_action, initial_delay_seconds=60, period_seconds=60) container = client.V1Container( name=self.name, image="nginx:1.7.9", ports=[client.V1ContainerPort(container_port=self.port)], liveness_probe=probe, readiness_probe=probe, ) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": self.name}), spec=client.V1PodSpec(containers=[container]), ) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=1, template=template) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.name, namespace=self.namespace), spec=spec, ) return deployment
def export_deployment(self): # Configureate Pod template container volume_mounts = [] containers = [] volumes = [] volume_mounts.append( client.V1VolumeMount(mount_path='/docker/logs', name='logs')) volumes.append( client.V1Volume(name='logs', host_path=client.V1HostPathVolumeSource( path='/opt/logs', type='DirectoryOrCreate'))) if self.mounts: for path in self.mounts: volume_mounts.append( client.V1VolumeMount(mount_path=path, name=self.mounts[path])) volumes.append( client.V1Volume(name=self.mounts[path], host_path=client.V1HostPathVolumeSource( path=path, type='DirectoryOrCreate'))) liveness_probe = client.V1Probe(initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) readiness_probe = client.V1Probe(initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) if self.healthcheck: liveness_probe = client.V1Probe(initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int( self.container_port[0]))) readiness_probe = client.V1Probe(initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int( self.container_port[0]))) Env = [ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'), client.V1EnvVar(name='POD_NAME', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='metadata.name'))), client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='status.podIP'))), ] container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=liveness_probe, readiness_probe=readiness_probe) containers.append(container) if self.sidecar: sidecar_container = client.V1Container( name='sidecar-%s' % self.dm_name, image=self.sidecar, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) containers.append(sidecar_container) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=containers, image_pull_secrets=[secrets], volumes=volumes, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=['moji']) ]), weight=30), client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='deploy', operator='In', values=[self.dm_name]) ]), weight=70) ])))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def test_sanitize_k8s_container_attribute(self): # test cases for implicit type sanitization(conversion) op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'], arguments=['echo test | tee /tmp/message.txt'], file_outputs={'merged': '/tmp/message.txt'}) op.container \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path='/secret/gcp-credentials', name='gcp-credentials')) \ .add_env_variable(k8s_client.V1EnvVar( name=80, value=80)) \ .add_env_variable(k8s_client.V1EnvVar( name=80, value_from=k8s_client.V1EnvVarSource( config_map_key_ref=k8s_client.V1ConfigMapKeySelector(key=80, name=8080, optional='False'), field_ref=k8s_client.V1ObjectFieldSelector(api_version=80, field_path=8080), resource_field_ref=k8s_client.V1ResourceFieldSelector(container_name=80, divisor=8080, resource=8888), secret_key_ref=k8s_client.V1SecretKeySelector(key=80, name=8080, optional='False') ) )) \ .add_env_from(k8s_client.V1EnvFromSource( config_map_ref=k8s_client.V1ConfigMapEnvSource(name=80, optional='True'), prefix=999 )) \ .add_env_from(k8s_client.V1EnvFromSource( secret_ref=k8s_client.V1SecretEnvSource(name=80, optional='True'), prefix=888 )) \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path=111, mount_propagation=222, name=333, read_only='False', sub_path=444, sub_path_expr=555 )) \ .add_volume_devices(k8s_client.V1VolumeDevice( device_path=111, name=222 )) \ .add_port(k8s_client.V1ContainerPort( container_port='8080', host_ip=111, host_port='8888', name=222, protocol=333 )) \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='True', capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]), privileged='False', proc_mount=111, read_only_root_filesystem='False', run_as_group='222', run_as_non_root='True', run_as_user='******', se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44), windows_options=k8s_client.V1WindowsSecurityContextOptions( gmsa_credential_spec=11, gmsa_credential_spec_name=22) )) \ .set_stdin(stdin='False') \ .set_stdin_once(stdin_once='False') \ .set_termination_message_path(termination_message_path=111) \ .set_tty(tty='False') \ .set_readiness_probe(readiness_probe=k8s_client.V1Probe( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), failure_threshold='111', http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), initial_delay_seconds='222', period_seconds='333', success_threshold='444', tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'), timeout_seconds='777' )) \ .set_liveness_probe(liveness_probe=k8s_client.V1Probe( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), failure_threshold='111', http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), initial_delay_seconds='222', period_seconds='333', success_threshold='444', tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'), timeout_seconds='777' )) \ .set_lifecycle(lifecycle=k8s_client.V1Lifecycle( post_start=k8s_client.V1Handler( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666') ), pre_stop=k8s_client.V1Handler( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666') ) )) sanitize_k8s_object(op.container) for e in op.container.env: self.assertIsInstance(e.name, str) if e.value: self.assertIsInstance(e.value, str) if e.value_from: if e.value_from.config_map_key_ref: self.assertIsInstance(e.value_from.config_map_key_ref.key, str) if e.value_from.config_map_key_ref.name: self.assertIsInstance(e.value_from.config_map_key_ref.name, str) if e.value_from.config_map_key_ref.optional: self.assertIsInstance(e.value_from.config_map_key_ref.optional, bool) if e.value_from.field_ref: self.assertIsInstance(e.value_from.field_ref.field_path, str) if e.value_from.field_ref.api_version: self.assertIsInstance(e.value_from.field_ref.api_version, str) if e.value_from.resource_field_ref: self.assertIsInstance(e.value_from.resource_field_ref.resource, str) if e.value_from.resource_field_ref.container_name: self.assertIsInstance(e.value_from.resource_field_ref.container_name, str) if e.value_from.resource_field_ref.divisor: self.assertIsInstance(e.value_from.resource_field_ref.divisor, str) if e.value_from.secret_key_ref: self.assertIsInstance(e.value_from.secret_key_ref.key, str) if e.value_from.secret_key_ref.name: self.assertIsInstance(e.value_from.secret_key_ref.name, str) if e.value_from.secret_key_ref.optional: self.assertIsInstance(e.value_from.secret_key_ref.optional, bool) for e in op.container.env_from: if e.prefix: self.assertIsInstance(e.prefix, str) if e.config_map_ref: if e.config_map_ref.name: self.assertIsInstance(e.config_map_ref.name, str) if e.config_map_ref.optional: self.assertIsInstance(e.config_map_ref.optional, bool) if e.secret_ref: if e.secret_ref.name: self.assertIsInstance(e.secret_ref.name, str) if e.secret_ref.optional: self.assertIsInstance(e.secret_ref.optional, bool) for e in op.container.volume_mounts: if e.mount_path: self.assertIsInstance(e.mount_path, str) if e.mount_propagation: self.assertIsInstance(e.mount_propagation, str) if e.name: self.assertIsInstance(e.name, str) if e.read_only: self.assertIsInstance(e.read_only, bool) if e.sub_path: self.assertIsInstance(e.sub_path, str) if e.sub_path_expr: self.assertIsInstance(e.sub_path_expr, str) for e in op.container.volume_devices: if e.device_path: self.assertIsInstance(e.device_path, str) if e.name: self.assertIsInstance(e.name, str) for e in op.container.ports: if e.container_port: self.assertIsInstance(e.container_port, int) if e.host_ip: self.assertIsInstance(e.host_ip, str) if e.host_port: self.assertIsInstance(e.host_port, int) if e.name: self.assertIsInstance(e.name, str) if e.protocol: self.assertIsInstance(e.protocol, str) if op.container.security_context: e = op.container.security_context if e.allow_privilege_escalation: self.assertIsInstance(e.allow_privilege_escalation, bool) if e.capabilities: for a in e.capabilities.add: self.assertIsInstance(a, str) for d in e.capabilities.drop: self.assertIsInstance(d, str) if e.privileged: self.assertIsInstance(e.privileged, bool) if e.proc_mount: self.assertIsInstance(e.proc_mount, str) if e.read_only_root_filesystem: self.assertIsInstance(e.read_only_root_filesystem, bool) if e.run_as_group: self.assertIsInstance(e.run_as_group, int) if e.run_as_non_root: self.assertIsInstance(e.run_as_non_root, bool) if e.run_as_user: self.assertIsInstance(e.run_as_user, int) if e.se_linux_options: if e.se_linux_options.level: self.assertIsInstance(e.se_linux_options.level, str) if e.se_linux_options.role: self.assertIsInstance(e.se_linux_options.role, str) if e.se_linux_options.type: self.assertIsInstance(e.se_linux_options.type, str) if e.se_linux_options.user: self.assertIsInstance(e.se_linux_options.user, str) if e.windows_options: if e.windows_options.gmsa_credential_spec: self.assertIsInstance(e.windows_options.gmsa_credential_spec, str) if e.windows_options.gmsa_credential_spec_name: self.assertIsInstance(e.windows_options.gmsa_credential_spec_name, str) if op.container.stdin: self.assertIsInstance(op.container.stdin, bool) if op.container.stdin_once: self.assertIsInstance(op.container.stdin_once, bool) if op.container.termination_message_path: self.assertIsInstance(op.container.termination_message_path, str) if op.container.tty: self.assertIsInstance(op.container.tty, bool) for e in [op.container.readiness_probe, op.container.liveness_probe]: if e: if e._exec: for c in e._exec.command: self.assertIsInstance(c, str) if e.failure_threshold: self.assertIsInstance(e.failure_threshold, int) if e.http_get: if e.http_get.host: self.assertIsInstance(e.http_get.host, str) if e.http_get.http_headers: for h in e.http_get.http_headers: if h.name: self.assertIsInstance(h.name, str) if h.value: self.assertIsInstance(h.value, str) if e.http_get.path: self.assertIsInstance(e.http_get.path, str) if e.http_get.port: self.assertIsInstance(e.http_get.port, (str, int)) if e.http_get.scheme: self.assertIsInstance(e.http_get.scheme, str) if e.initial_delay_seconds: self.assertIsInstance(e.initial_delay_seconds, int) if e.period_seconds: self.assertIsInstance(e.period_seconds, int) if e.success_threshold: self.assertIsInstance(e.success_threshold, int) if e.tcp_socket: if e.tcp_socket.host: self.assertIsInstance(e.tcp_socket.host, str) if e.tcp_socket.port: self.assertIsInstance(e.tcp_socket.port, (str, int)) if e.timeout_seconds: self.assertIsInstance(e.timeout_seconds, int) if op.container.lifecycle: for e in [op.container.lifecycle.post_start, op.container.lifecycle.pre_stop]: if e: if e._exec: for c in e._exec.command: self.assertIsInstance(c, str) if e.http_get: if e.http_get.host: self.assertIsInstance(e.http_get.host, str) if e.http_get.http_headers: for h in e.http_get.http_headers: if h.name: self.assertIsInstance(h.name, str) if h.value: self.assertIsInstance(h.value, str) if e.http_get.path: self.assertIsInstance(e.http_get.path, str) if e.http_get.port: self.assertIsInstance(e.http_get.port, (str, int)) if e.http_get.scheme: self.assertIsInstance(e.http_get.scheme, str) if e.tcp_socket: if e.tcp_socket.host: self.assertIsInstance(e.tcp_socket.host, str) if e.tcp_socket.port: self.assertIsInstance(e.tcp_socket.port, (str, int)) # test cases for checking value after sanitization check_value_op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'], arguments=['echo test | tee /tmp/message.txt'], file_outputs={'merged': '/tmp/message.txt'}) check_value_op.container \ .add_env_variable(k8s_client.V1EnvVar( name=80, value=8080)) \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='true', capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]), privileged='false', proc_mount=111, read_only_root_filesystem='False', run_as_group='222', run_as_non_root='True', run_as_user='******', se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44), windows_options=k8s_client.V1WindowsSecurityContextOptions( gmsa_credential_spec=11, gmsa_credential_spec_name=22) )) sanitize_k8s_object(check_value_op.container) self.assertEqual(check_value_op.container.env[0].name, '80') self.assertEqual(check_value_op.container.env[0].value, '8080') self.assertEqual(check_value_op.container.security_context.allow_privilege_escalation, True) self.assertEqual(check_value_op.container.security_context.capabilities.add[0], '11') self.assertEqual(check_value_op.container.security_context.capabilities.add[1], '22') self.assertEqual(check_value_op.container.security_context.capabilities.drop[0], '33') self.assertEqual(check_value_op.container.security_context.capabilities.drop[1], '44') self.assertEqual(check_value_op.container.security_context.privileged, False) self.assertEqual(check_value_op.container.security_context.proc_mount, '111') self.assertEqual(check_value_op.container.security_context.read_only_root_filesystem, False) self.assertEqual(check_value_op.container.security_context.run_as_group, 222) self.assertEqual(check_value_op.container.security_context.run_as_non_root, True) self.assertEqual(check_value_op.container.security_context.run_as_user, 333) self.assertEqual(check_value_op.container.security_context.se_linux_options.level, '11') self.assertEqual(check_value_op.container.security_context.se_linux_options.role, '22') self.assertEqual(check_value_op.container.security_context.se_linux_options.type, '33') self.assertEqual(check_value_op.container.security_context.se_linux_options.user, '44') self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec, '11') self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec_name, '22') # test cases for exception with self.assertRaises(ValueError, msg='Invalid boolean string 2. Should be boolean.'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation=1 )) sanitize_k8s_object(exception_op.container) with self.assertRaises(ValueError, msg='Invalid boolean string Test. Should be "true" or "false".'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='Test' )) sanitize_k8s_object(exception_op.container) with self.assertRaises(ValueError, msg='Invalid test. Should be integer.'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( run_as_group='test', )) sanitize_k8s_object(exception_op.container)
def create_deploy(): error = "" if request.method == "POST": data = json.loads(request.get_data().decode("utf-8")) project = data.get("project").strip() environment = data.get("environment").strip() cluster = data.get("cluster").strip() imageRepo = data.get("imageRepo").strip() imageName = data.get("imageName").strip() imageTag = data.get("imageTag").strip() imagePullPolicy = data.get("imagePullPolicy").strip() imagePullSecret = data.get("imagePullSecret").strip() containerPort = str_to_int(data.get("containerPort").strip()) replicas = data.get("replicas").strip() cpu = data.get("cpu").strip() memory = data.get("memory").strip() label_key1 = data.get("label_key1").strip() label_value1 = data.get("label_value1").strip() label_key2 = data.get("label_key2").strip() label_value2 = data.get("label_value2").strip() env = data.get("env").strip() volumeMount = data.get("volumeMount").strip() updateType = data.get("updateType").strip() probeType = data.get("probeType").strip() healthCheck = data.get("healthCheck").strip() healthPath = data.get("healthPath").strip() initialDelaySeconds = str_to_int( data.get("initialDelaySeconds").strip()) periodSeconds = str_to_int(data.get("periodSeconds").strip()) failureThreshold = str_to_int(data.get("failureThreshold").strip()) healthTimeout = str_to_int(data.get("healthTimeout").strip()) healthCmd = data.get("healthCmd").strip() liveness_probe = None readiness_probe = None if (healthCheck == "true"): if (probeType == "tcp"): liveness_probe = client.V1Probe(initial_delay_seconds=initialDelaySeconds,\ period_seconds = periodSeconds,\ timeout_seconds = healthTimeout ,\ failure_threshold = failureThreshold,\ tcp_socket=client.V1TCPSocketAction(port=containerPort)) readiness_probe = liveness_probe elif (probeType == "http"): liveness_probe = client.V1Probe(initial_delay_seconds=initialDelaySeconds,\ period_seconds = periodSeconds,\ timeout_seconds = healthTimeout ,\ failure_threshold = failureThreshold,\ http_get=client.V1HTTPGetAction(path=healthPath,port=containerPort)) readiness_probe = liveness_probe elif (probeType == "cmd"): pass else: pass if (containerPort == 1): error = "容器端口不能为空" if (imageRepo == "" or project == "" or environment == "" or imageName == "" or imageTag == ""): error = "镜像相关不能为空" if (label_key1 == "" or label_value1 == ""): error = "label相关数据不能为空(至少输入一对key/value)" replicas = str_to_int(replicas) cpu = int(1000 * (str_to_float(cpu))) memory = int(1024 * (str_to_float(memory))) if (error != ""): # print(error) return jsonify({"error": 1002, "msg": error}) #ms-dev namespace = project + "-" + environment # myhub.mydocker.com/ms-dev/base:v1.0 image = imageRepo + "/" + project + "-" + environment + "/" + imageName + ":" + imageTag labels = {label_key1: label_value1} if (label_key2 != "" and label_value2 != ""): labels[label_key2] = label_value2 myclient = client.AppsV1Api() deployment = create_deployment_object(name=imageName,namespace=namespace,image=image,port=containerPort,\ image_pull_policy=imagePullPolicy,imagePullSecret=imagePullSecret ,labels=labels,replicas=replicas,cpu=cpu,memory=memory,\ liveness_probe=liveness_probe,readiness_probe=readiness_probe) # print(type(deployment)) to_yaml = yaml.load(json.dumps(deployment, indent=4, cls=MyEncoder)) file = os.path.join(dir_path, "demo-deployment.yaml") stream = open(file, 'w') yaml.safe_dump(to_yaml, stream, default_flow_style=False) status = create_deployment(api_instance=myclient, namespace=namespace, deployment=deployment) return json.dumps(deployment, indent=4, cls=MyEncoder) return jsonify({'a': 1})
def generate_pod(): metadata = client.V1ObjectMeta( name="platform-app-958795556-2nqgj", namespace="production", generate_name="platform-app-958795556-", labels={ "app": "platform", "chart": "platform", "component": "app", "heritage": "Helm", "pod-template-hash": "958795556", "release": "platform-production", "version": "1.0.3", }, owner_references=[ client.V1OwnerReference( api_version="apps/v1", kind="ReplicaSet", name="platform-app-958795556", uid="35ba938b-681d-11eb-a74a-16e1a04d726b", controller=True, block_owner_deletion=True, ) ], ) container = client.V1Container( name="app", image="platform.azurecr.io/app:master", image_pull_policy="Always", termination_message_policy="File", termination_message_path="/dev/termination-log", env=[], resources=client.V1ResourceRequirements( limits={ "cpu": "1200m", "memory": "1Gi" }, requests={ "cpu": "1", "memory": "768Mi" }, ), ports=[client.V1ContainerPort(container_port=3000, protocol="TCP")], volume_mounts=[ client.V1VolumeMount( name="default-token-2cg25", read_only=True, mount_path="/var/run/secrets/kubernetes.io/serviceaccount", ) ], liveness_probe=client.V1Probe( initial_delay_seconds=10, timeout_seconds=5, period_seconds=10, success_threshold=1, failure_threshold=6, http_get=client.V1HTTPGetAction(path="/health/liveness", port=3000, scheme="HTTP"), ), readiness_probe=client.V1Probe( initial_delay_seconds=10, timeout_seconds=5, period_seconds=10, success_threshold=2, failure_threshold=6, http_get=client.V1HTTPGetAction(path="/health/readness", port=3000, scheme="HTTP"), ), ) spec = client.V1PodSpec( containers=[container], volumes=[ client.V1Volume( name="default-token-2cg25", secret=client.V1SecretVolumeSource( secret_name="default-token-2cg25", default_mode=420), ) ], restart_policy="Always", termination_grace_period_seconds=30, dns_policy="ClusterFirst", service_account_name="default", service_account="default", node_name="aks-agentpool-26722002-vmss00039t", security_context=client.V1PodSecurityContext(run_as_user=1000, fs_group=1000), scheduler_name="default-scheduler", tolerations=[ client.V1Toleration( key="node.kubernetes.io/not-ready", operator="Exists", effect="NoExecute", toleration_seconds=300, ), client.V1Toleration( key="node.kubernetes.io/unreachable", operator="Exists", effect="NoExecute", toleration_seconds=300, ), ], priority=0, enable_service_links=True, ) return client.V1Pod(metadata=metadata, spec=spec)
def export_deployment(self): # Configureate Pod template container volume_mounts = [] containers = [] volumes = [] ports = [] liveness_probe = None readiness_probe = None volume_mounts.append( client.V1VolumeMount(mount_path='/docker/logs', name='logs')) volumes.append( client.V1Volume(name='logs', host_path=client.V1HostPathVolumeSource( path='/opt/logs', type='DirectoryOrCreate'))) if self.mounts: for path in self.mounts: volume_mounts.append( client.V1VolumeMount(mount_path=path, name=self.mounts[path])) volumes.append( client.V1Volume(name=self.mounts[path], host_path=client.V1HostPathVolumeSource( path=path, type='DirectoryOrCreate'))) if self.container_port: ports = [ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ] liveness_probe = client.V1Probe( initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) readiness_probe = client.V1Probe( initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) if self.healthcheck: liveness_probe = client.V1Probe( initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int(self.container_port[0]))) readiness_probe = client.V1Probe( initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int(self.container_port[0]))) Env = [ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'), client.V1EnvVar(name='POD_NAME', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='metadata.name'))), client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='status.podIP'))), ] container = client.V1Container(name=self.dm_name, image=self.image, ports=ports, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) if liveness_probe and readiness_probe: container = client.V1Container( name=self.dm_name, image=self.image, ports=ports, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=liveness_probe, readiness_probe=readiness_probe) containers.append(container) if self.sidecar: sidecar_container = client.V1Container( name='sidecar-%s' % self.dm_name, image=self.sidecar, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) containers.append(sidecar_container) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') preference_key = self.dm_name project_values = ['xxxx'] host_aliases = [] db_docker_hosts = db_op.docker_hosts values = db_docker_hosts.query.with_entities( db_docker_hosts.ip, db_docker_hosts.hostname).filter( and_(db_docker_hosts.deployment == self.dm_name, db_docker_hosts.context == self.context)).all() db_op.DB.session.remove() if values: ips = [] for value in values: try: ip, hostname = value key = "op_docker_hosts_%s" % ip Redis.lpush(key, hostname) ips.append(ip) except Exception as e: logging.error(e) for ip in set(ips): try: key = "op_docker_hosts_%s" % ip if Redis.exists(key): hostnames = Redis.lrange(key, 0, -1) if hostnames: host_aliases.append( client.V1HostAlias(hostnames=hostnames, ip=ip)) Redis.delete(key) except Exception as e: logging.error(e) if self.labels: if 'deploy' in self.labels: preference_key = self.labels['deploy'] if 'project' in self.labels: project_values = [self.labels['project']] template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=containers, image_pull_secrets=[secrets], volumes=volumes, host_aliases=host_aliases, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key=preference_key, operator='In', values=['mark']) ]), weight=100) ], required_during_scheduling_ignored_during_execution=client. V1NodeSelector(node_selector_terms=[ client.V1NodeSelectorTerm(match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=project_values) ]) ]))))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def generate_stateful_set(self): volume_claim_spec = client.V1PersistentVolumeClaimSpec(**self.volume_claim_spec) if not volume_claim_spec.access_modes: volume_claim_spec.access_modes = ["ReadWriteOnce"] if not volume_claim_spec.resources: volume_claim_spec.resources = client.V1ResourceRequirements( requests={"storage": "20Gi"} ) stateful_set = client.V1beta1StatefulSet( metadata=self.metadata, spec=client.V1beta1StatefulSetSpec( # we can't update service name or pod management policy service_name=self.full_name + "-headless", pod_management_policy="Parallel", # we can't update volume claim templates volume_claim_templates=[client.V1PersistentVolumeClaim( metadata=client.V1ObjectMeta( name="image-store", ), spec=volume_claim_spec, )] ) ) stateful_set.spec.replicas = 2 pod_labels = {'component': 'registry'} pod_labels.update(self.labels) volumes = [] if self.ca_certificate_bundle: volumes = [ client.V1Volume( name=self.ca_certificate_bundle, config_map=client.V1ConfigMapVolumeSource( name=self.ca_certificate_bundle ) ) ] volumes.append( client.V1Volume( name="tls", secret=client.V1SecretVolumeSource( secret_name=self.docker_certificate_secret ), ) ) volumes_to_mount = [ client.V1VolumeMount( name="image-store", mount_path="/var/lib/registry" ), client.V1VolumeMount( name="tls", mount_path="/etc/registry-certs", read_only=True ) ] if self.ca_certificate_bundle: volumes_to_mount.append( client.V1VolumeMount( name=self.ca_certificate_bundle, mount_path="/etc/ssl/certs", read_only=True ) ) env = [client.V1EnvVar(name="REGISTRY_PROXY_REMOTEURL", value="https://" + self.upstreamUrl), client.V1EnvVar(name="REGISTRY_HTTP_ADDR", value=":5000"), client.V1EnvVar(name="REGISTRY_HTTP_DEBUG_ADDR", value="localhost:6000"), client.V1EnvVar(name="REGISTRY_HTTP_TLS_CERTIFICATE", value="/etc/registry-certs/tls.crt"), client.V1EnvVar(name="REGISTRY_HTTP_TLS_KEY", value="/etc/registry-certs/tls.key"), client.V1EnvVar(name="REGISTRY_LOG_ACCESSLOG_DISABLED", value="true"), client.V1EnvVar(name="REGISTRY_LOG_FORMATTER", value="logstash"), client.V1EnvVar(name="REGISTRY_STORAGE_DELETE_ENABLED", value="true"), client.V1EnvVar(name="REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", value="/var/lib/registry") ] env = self.handle_proxy_credentials(env) stateful_set.spec.template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels=pod_labels ), spec=client.V1PodSpec( init_containers=[ client.V1Container( name="validate-state-file", image="python:3.6-alpine", env=[ client.V1EnvVar( name="STATE_FILE", value="/var/lib/registry/scheduler-state.json" ), client.V1EnvVar( name="LOWER_LIMIT", value="1024" ), ], volume_mounts=[ client.V1VolumeMount( name="image-store", mount_path="/var/lib/registry" ) ], command=[ "sh", "-e", "-c", "touch $STATE_FILE; if [[ $(stat -c \"%s\" $STATE_FILE) -lt $LOWER_LIMIT ]]; then rm -f $STATE_FILE; else cat $STATE_FILE | python -m json.tool > /dev/null 2>&1 || rm -f $STATE_FILE; fi" # noqa ] ) ], containers=[ client.V1Container( name="registry", image="registry:2.6.0", env=env, readiness_probe=client.V1Probe( http_get=client.V1HTTPGetAction( path="/", port=5000, scheme="HTTPS" ), initial_delay_seconds=3, period_seconds=3 ), ports=[client.V1ContainerPort( container_port=5000, name="https" )], resources=client.V1ResourceRequirements( requests={"cpu": "0.1", "memory": "500Mi"}, limits={"cpu": "0.5", "memory": "500Mi"} ), volume_mounts=volumes_to_mount, ) ], termination_grace_period_seconds=10, volumes=volumes, ) ) stateful_set.spec.update_strategy = client.V1beta1StatefulSetUpdateStrategy(type="RollingUpdate",) return stateful_set
def specifications(self): """ Set the deployment specifications. :returns: The deployment specifications. :rtype: client.ExtensionsV1beta1Deployment """ #1. Set the ports ports = [client.V1ContainerPort(container_port=port['number'], protocol=port['protocol']) for port in self.definition['ports']] #2. Set the resources #resources = {'cpu':self.definition['cpu'], 'memory': self.definition['memory']} resources = {'cpu':'100m', 'memory': '100Mi'} resources = client.V1ResourceRequirements(limits=resources, requests=resources) #3. Set healthchecks readiness = None liveness = None for health in self.definition['healthchecks']: _exec, tcp_socket, http_get = None, None, None if health['command'] == 'COMMAND': _exec = client.V1ExecAction(command=health['value']) elif health['command'] == 'TCP': tcp_socket = client.V1TCPSocketAction(port=health['port']) else: http_get = client.V1HTTPGetAction(path=health['path'], port=health['port'], scheme=health['command']) probe = client.V1Probe(failure_threshold=self.definition['failure_threshold'], initial_delay_seconds=self.definition['initial_delay_seconds'], period_seconds=self.definition['interval_seconds'], success_threshold=self.definition['success_threshold'], timeout_seconds=self.definition['timeout_seconds'], _exec=_exec, tcp_socket=tcp_socket, http_get=http_get ) if health['type'] == 'readiness': readiness = probe else: liveness = probe #4. Security context security = client.V1SecurityContext(allow_privilege_escalation=False, run_as_non_root=True) #5. Set the container container = client.V1Container( name=self.definition['repo'], image=self.definition['tag'], ports=ports, resources=resources, readiness_probe=readiness, liveness_probe=liveness, security_context=security, env=None)#No need to setup envs : already done in the dockerfile #6. The pod template template = client.V1PodTemplateSpec( client.V1ObjectMeta(labels={"name": self.definition['repo'], "repo": self.definition['repo'], "owner": self.definition['owner'], "branch": self.definition['branch'], "fullname": self.definition['name']}), spec=client.V1PodSpec(containers=[container])) #7. The rolling update strategy strategy = client.ExtensionsV1beta1DeploymentStrategy( type='RollingUpdate', rolling_update=client.ExtensionsV1beta1RollingUpdateDeployment( max_surge=1, max_unavailable=1 ) ) #8. The deployment spec spec = client.ExtensionsV1beta1DeploymentSpec( #replicas=self.definition['instances'], replicas=1, strategy=strategy, template=template) #9. The deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.definition['repo'], labels={"uid": self.definition['uid'], "repo": self.definition['repo'], "owner": self.definition['owner'], "branch": self.definition['branch'], "fullname": self.definition['name'], "name": self.definition['repo']}), spec=spec) return deployment
def from_runs(cls, id: str, runs: List[Run]): k8s_name = 'tensorboard-' + id run_names_hash = K8STensorboardInstance.generate_run_names_hash(runs) volume_mounts = [] for run in runs: mount = k8s.V1VolumeMount( name=cls.EXPERIMENTS_OUTPUT_VOLUME_NAME, mount_path=os.path.join( cls.TENSORBOARD_CONTAINER_MOUNT_PATH_PREFIX, run.owner, run.name), sub_path=os.path.join(run.owner, run.name)) volume_mounts.append(mount) deployment_labels = { 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id, 'runs-hash': run_names_hash } tensorboard_command = [ "tensorboard", "--logdir", cls.TENSORBOARD_CONTAINER_MOUNT_PATH_PREFIX, "--port", "6006", "--host", "127.0.0.1" ] nauta_config = NautaPlatformConfig.incluster_init() tensorboard_image = nauta_config.get_tensorboard_image() tensorboard_proxy_image = nauta_config.get_activity_proxy_image() deployment = k8s.V1Deployment( api_version='apps/v1', kind='Deployment', metadata=k8s.V1ObjectMeta(name=k8s_name, labels=deployment_labels), spec=k8s.V1DeploymentSpec( replicas=1, selector=k8s.V1LabelSelector(match_labels=deployment_labels), template=k8s.V1PodTemplateSpec( metadata=k8s.V1ObjectMeta(labels=deployment_labels), spec=k8s.V1PodSpec( tolerations=[ k8s.V1Toleration(key='master', operator='Exists', effect='NoSchedule') ], affinity=k8s. V1Affinity(node_affinity=k8s.V1NodeAffinity( required_during_scheduling_ignored_during_execution =k8s.V1NodeSelector(node_selector_terms=[ k8s.V1NodeSelectorTerm(match_expressions=[ k8s.V1NodeSelectorRequirement( key="master", operator="In", values=["True"]) ]) ]))), containers=[ k8s.V1Container(name='app', image=tensorboard_image, command=tensorboard_command, volume_mounts=volume_mounts), k8s.V1Container( name='proxy', image=tensorboard_proxy_image, ports=[k8s.V1ContainerPort(container_port=80)], readiness_probe=k8s.V1Probe( period_seconds=5, http_get=k8s.V1HTTPGetAction( path='/healthz', port=80))) ], volumes=[ k8s.V1Volume( name=cls.EXPERIMENTS_OUTPUT_VOLUME_NAME, persistent_volume_claim= # noqa k8s.V1PersistentVolumeClaimVolumeSource( claim_name=cls. EXPERIMENTS_OUTPUT_VOLUME_NAME, read_only=True)) ])))) service = k8s.V1Service( api_version='v1', kind='Service', metadata=k8s.V1ObjectMeta(name=k8s_name, labels={ 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id }), spec=k8s.V1ServiceSpec( type='ClusterIP', ports=[k8s.V1ServicePort(name='web', port=80, target_port=80)], selector={ 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id })) ingress = k8s.V1beta1Ingress( api_version='extensions/v1beta1', kind='Ingress', metadata=k8s.V1ObjectMeta( name=k8s_name, labels={ 'name': k8s_name, 'type': 'nauta-tensorboard', 'nauta_app_name': 'tensorboard', 'id': id }, annotations={ 'nauta.ingress.kubernetes.io/rewrite-target': '/', 'kubernetes.io/ingress.class': 'nauta-ingress' }), spec=k8s.V1beta1IngressSpec(rules=[ k8s.V1beta1IngressRule( host='localhost', http=k8s.V1beta1HTTPIngressRuleValue(paths=[ k8s.V1beta1HTTPIngressPath( path='/tb/' + id + "/", backend=k8s.V1beta1IngressBackend( service_name=k8s_name, service_port=80)) ])) ])) return cls(deployment=deployment, service=service, ingress=ingress)
def get_container(self, volume_mounts: List[client.V1VolumeMount]): # We want to serve static files in development, i.e admin page command = [ "python", "manage.py", "runserver", "0.0.0.0:8080" ] if self.context.minikube else None return client.V1Container( image=self.context.image, image_pull_policy='IfNotPresent', name='django-app', ports=[ client.V1ContainerPort( container_port=8080 ) ], command=command, volume_mounts=volume_mounts, env=[ client.V1EnvVar( name='DATABASE_URL', value='postgres://*****:*****@postgres-service:5432/dev_db' ), client.V1EnvVar( name='ALLOWED_HOSTS', value='.kangox.com,127.0.0.1,[::1],localhost' ), client.V1EnvVar( name='RABBITMQ_PASSWORD', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name='rabbitmq', key='rabbitmq-password' ) ) ) ], readiness_probe=client.V1Probe( http_get=client.V1HTTPGetAction( path='/ping/', port=8080, http_headers=[ client.V1HTTPHeader( name='Host', value='127.0.0.1' ) ] ), period_seconds=5, initial_delay_seconds=5, failure_threshold=1, ), liveness_probe=client.V1Probe( http_get=client.V1HTTPGetAction( path='/ping/', port=8080, http_headers=[ client.V1HTTPHeader( name='Host', value='127.0.0.1' ) ] ), period_seconds=5, initial_delay_seconds=5, failure_threshold=1 ) )