def get_affinity(self): """Determine the affinity term for the build pod. There are a two affinity strategies, which one is used depends on how the BinderHub is configured. In the default setup the affinity of each build pod is an "anti-affinity" which causes the pods to prefer to schedule on separate nodes. In a setup with docker-in-docker enabled pods for a particular repository prefer to schedule on the same node in order to reuse the docker layer cache of previous builds. """ resp = self.api.list_namespaced_pod( self.namespace, label_selector="component=dind,app=binder", _request_timeout=KUBE_REQUEST_TIMEOUT, _preload_content=False, ) dind_pods = json.loads(resp.read()) if self.sticky_builds and dind_pods: node_names = [ pod["spec"]["nodeName"] for pod in dind_pods["items"] ] ranked_nodes = rendezvous_rank(node_names, self.repo_url) best_node_name = ranked_nodes[0] affinity = client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( weight=100, preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key="kubernetes.io/hostname", operator="In", values=[best_node_name], ) ]), ) ])) else: affinity = client.V1Affinity( pod_anti_affinity=client.V1PodAntiAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1WeightedPodAffinityTerm( weight=100, pod_affinity_term=client.V1PodAffinityTerm( topology_key="kubernetes.io/hostname", label_selector=client.V1LabelSelector( match_labels=dict( component=self._component_label)), ), ) ])) return affinity
def _generate_affinity(self): return k8s_client.V1Affinity( node_affinity=k8s_client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ k8s_client.V1PreferredSchedulingTerm( weight=1, preference=k8s_client.V1NodeSelectorTerm( match_expressions=[ k8s_client.V1NodeSelectorRequirement( key="some_node_label", operator="In", values=[ "possible-label-value-1", "possible-label-value-2", ], ) ]), ) ], required_during_scheduling_ignored_during_execution=k8s_client. V1NodeSelector(node_selector_terms=[ k8s_client.V1NodeSelectorTerm(match_expressions=[ k8s_client.V1NodeSelectorRequirement( key="some_node_label", operator="In", values=[ "required-label-value-1", "required-label-value-2", ], ) ]), ]), ), pod_affinity=k8s_client.V1PodAffinity( required_during_scheduling_ignored_during_execution=[ k8s_client.V1PodAffinityTerm( label_selector=k8s_client.V1LabelSelector( match_labels={ "some-pod-label-key": "some-pod-label-value" }), namespaces=["namespace-a", "namespace-b"], topology_key="key-1", ) ]), pod_anti_affinity=k8s_client.V1PodAntiAffinity( preferred_during_scheduling_ignored_during_execution=[ k8s_client.V1WeightedPodAffinityTerm( weight=1, pod_affinity_term=k8s_client.V1PodAffinityTerm( label_selector=k8s_client.V1LabelSelector( match_expressions=[ k8s_client.V1LabelSelectorRequirement( key="some_pod_label", operator="NotIn", values=[ "forbidden-label-value-1", "forbidden-label-value-2", ], ) ]), namespaces=["namespace-c"], topology_key="key-2", ), ) ]), )
def export_deployment(self): # Configureate Pod template container volume_mounts = [] containers = [] volumes = [] volume_mounts.append( client.V1VolumeMount(mount_path='/docker/logs', name='logs')) volumes.append( client.V1Volume(name='logs', host_path=client.V1HostPathVolumeSource( path='/opt/logs', type='DirectoryOrCreate'))) if self.mounts: for path in self.mounts: volume_mounts.append( client.V1VolumeMount(mount_path=path, name=self.mounts[path])) volumes.append( client.V1Volume(name=self.mounts[path], host_path=client.V1HostPathVolumeSource( path=path, type='DirectoryOrCreate'))) liveness_probe = client.V1Probe(initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) readiness_probe = client.V1Probe(initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) if self.healthcheck: liveness_probe = client.V1Probe(initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int( self.container_port[0]))) readiness_probe = client.V1Probe(initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int( self.container_port[0]))) Env = [ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'), client.V1EnvVar(name='POD_NAME', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='metadata.name'))), client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='status.podIP'))), ] container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=liveness_probe, readiness_probe=readiness_probe) containers.append(container) if self.sidecar: sidecar_container = client.V1Container( name='sidecar-%s' % self.dm_name, image=self.sidecar, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) containers.append(sidecar_container) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=containers, image_pull_secrets=[secrets], volumes=volumes, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=['moji']) ]), weight=30), client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='deploy', operator='In', values=[self.dm_name]) ]), weight=70) ])))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def clean_pod_template(pod_template, match_node_purpose="prefer", pod_type="worker"): """ Normalize pod template and check for type errors """ if isinstance(pod_template, str): msg = ( "Expected a kubernetes.client.V1Pod object, got %s" "If trying to pass a yaml filename then use " "KubeCluster.from_yaml" ) raise TypeError(msg % pod_template) if isinstance(pod_template, dict): msg = ( "Expected a kubernetes.client.V1Pod object, got %s" "If trying to pass a dictionary specification then use " "KubeCluster.from_dict" ) raise TypeError(msg % str(pod_template)) pod_template = copy.deepcopy(pod_template) # Make sure metadata / labels / env objects exist, so they can be modified # later without a lot of `is None` checks if pod_template.metadata is None: pod_template.metadata = client.V1ObjectMeta() if pod_template.metadata.labels is None: pod_template.metadata.labels = {} if pod_template.spec.containers[0].env is None: pod_template.spec.containers[0].env = [] # add default tolerations tolerations = [ client.V1Toleration( key="k8s.dask.org/dedicated", operator="Equal", value=pod_type, effect="NoSchedule", ), # GKE currently does not permit creating taints on a node pool # with a `/` in the key field client.V1Toleration( key="k8s.dask.org_dedicated", operator="Equal", value=pod_type, effect="NoSchedule", ), ] if pod_template.spec.tolerations is None: pod_template.spec.tolerations = tolerations else: pod_template.spec.tolerations.extend(tolerations) # add default node affinity to k8s.dask.org/node-purpose=worker if match_node_purpose != "ignore": # for readability affinity = pod_template.spec.affinity if affinity is None: affinity = client.V1Affinity() if affinity.node_affinity is None: affinity.node_affinity = client.V1NodeAffinity() # a common object for both a preferred and a required node affinity node_selector_term = client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key="k8s.dask.org/node-purpose", operator="In", values=[pod_type] ) ] ) if match_node_purpose == "require": if ( affinity.node_affinity.required_during_scheduling_ignored_during_execution is None ): affinity.node_affinity.required_during_scheduling_ignored_during_execution = client.V1NodeSelector( node_selector_terms=[] ) affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms.append( node_selector_term ) elif match_node_purpose == "prefer": if ( affinity.node_affinity.preferred_during_scheduling_ignored_during_execution is None ): affinity.node_affinity.preferred_during_scheduling_ignored_during_execution = ( [] ) preferred_scheduling_terms = [ client.V1PreferredSchedulingTerm( preference=node_selector_term, weight=100 ) ] affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.extend( preferred_scheduling_terms ) else: raise ValueError( 'Attribute must be one of "ignore", "prefer", or "require".' ) pod_template.spec.affinity = affinity return pod_template
def update_deploy_v2(): data = json.loads(request.get_data().decode('UTF-8')) current_app.logger.debug("接受到的数据:{}".format(data)) namespace = handle_input(data.get('namespace')) deploy_name = handle_input(data.get('deploy_name')) action = handle_input(data.get('action')) image = None replicas = None toleration = None pod_anti_affinity = None pod_affinity = None node_affinity = None labels = None if action == "add_pod_anti_affinity": print("正在运行{}操作".format(action)) affinity = handle_input(data.get('pod_anti_affinity')) affinity_type = handle_input(affinity.get('type')) labelSelector = handle_input(affinity.get('labelSelector')) key = handle_input(affinity.get('key')) value = handle_input(affinity.get('value')) topologyKey = handle_input(affinity.get('topologyKey')) if affinity_type == "required": if labelSelector == "matchExpressions": if not isinstance(value, list): value = [value] operator = handle_input(affinity.get('operator')) if operator != 'In' and operator != 'NotIn': value = None print(value) label_selector = client.V1LabelSelector(match_expressions=[ client.V1LabelSelectorRequirement( key=key, operator=operator, values=value) ]) elif labelSelector == "matchLabels": if isinstance(value, list): return jsonify( {"error": "{}模式下不支持values设置为数组".format(labelSelector)}) label_selector = client.V1LabelSelector( match_labels={key: value}) else: return jsonify( {"error": "不支持{} labelSelector".format(labelSelector)}) client.V1Affinity pod_anti_affinity = client.V1PodAntiAffinity( required_during_scheduling_ignored_during_execution=[ client.V1PodAffinityTerm(label_selector=label_selector, topology_key=topologyKey) ]) print("添加的互斥调度为:{}".format(pod_anti_affinity)) elif affinity_type == "preferred": weight = string_to_int(handle_input(affinity.get('weight'))) if weight == None: return jsonify( {"error": "{}类型必须设置weight".format(affinity_type)}) if labelSelector == "matchExpressions": if not isinstance(value, list): value = [value] operator = handle_input(affinity.get('operator')) if operator != 'In' and operator != 'NotIn': value = None label_selector = client.V1LabelSelector(match_expressions=[ client.V1LabelSelectorRequirement( key=key, operator=operator, values=value) ]) elif labelSelector == "matchLabels": if isinstance(value, list): return jsonify( {"error": "{}模式下不支持values设置为数组".format(labelSelector)}) label_selector = client.V1LabelSelector( match_labels={key: value}) else: return jsonify( {"error": "不支持{} labelSelector".format(labelSelector)}) pod_anti_affinity = client.V1PodAntiAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1WeightedPodAffinityTerm( pod_affinity_term=client.V1PodAffinityTerm( label_selector=label_selector, topology_key=topologyKey), weight=weight) ]) print("添加的互斥调度为:{}".format(pod_anti_affinity)) else: return jsonify({"error": "不支持{}这种调度".format(affinity_type)}) elif action == "delete_pod_anti_affinity": print("正在运行{}操作".format(action)) pass elif action == "add_node_affinity": current_app.logger.debug("正在运行{}操作".format(action)) affinity = handle_input(data.get('node_affinity')) node_affinity_type = handle_input(affinity.get('type')) nodeSelector = handle_input(affinity.get('nodeSelector')) key = handle_input(affinity.get('key')) value = handle_input(affinity.get('value')) operator = handle_input(affinity.get('operator')) values = [] if operator == 'Exists' or operator == 'DoesNotExist': values == None else: if not isinstance(value, list): values.append(value) else: values = value if node_affinity_type == "preferred": weight = string_to_int(handle_input(affinity.get('weight'))) if weight == None: return simple_error_handle( "{}类型必须设置weight".format(node_affinity_type)) preferred_term = [] if nodeSelector == "matchExpressions": match_expressions = [] expression = client.V1NodeSelectorRequirement( key=key, operator=operator, values=values, ) match_expressions.append(expression) preference = client.V1NodeSelectorTerm( match_expressions=match_expressions) # nodeSelector == "matchFields" else: match_fields = [] field = client.V1NodeSelectorRequirement( key=key, operator=operator, values=values, ) match_fields.append(field) preference = client.V1NodeSelectorTerm( match_fields=match_fields) term = client.V1PreferredSchedulingTerm( weight=weight, preference=preference, ) preferred_term.append(term) node_affinity = client.V1NodeAffinity( #直接append preferred_during_scheduling_ignored_during_execution= preferred_term) elif node_affinity_type == "required": current_app.logger.debug( "node_affinity_type:{}".format(node_affinity_type)) node_selector_terms = [] if nodeSelector == "matchExpressions": match_expressions = [] expression = client.V1NodeSelectorRequirement( key=key, operator=operator, values=values, ) match_expressions.append(expression) term = client.V1NodeSelectorTerm( match_expressions=match_expressions) else: match_fields = [] field = client.V1NodeSelectorRequirement( key=key, operator=operator, values=values, ) match_fields.append(field) term = client.V1NodeSelectorTerm(match_fields=match_fields) node_selector_terms.append(term) node_affinity = client.V1NodeAffinity( required_during_scheduling_ignored_during_execution=client. V1NodeSelector(node_selector_terms=node_selector_terms)) else: return simple_error_handle("不支持{}这种调度".format(node_affinity_type)) elif action == "delete_node_affinity": print("正在运行{}操作".format(action)) pass elif action == "add_toleration": print("正在运行{}操作".format(action)) t = handle_input(data.get("toleration")) print(type(toleration), toleration) effect = t.get('effect') key = t.get('key') operator = t.get('operator') value = t.get('value') toleration_seconds = handle_toleraion_seconds( t.get('toleration_seconds')) print("toleration_seconds:{}".format(toleration_seconds)) toleration = client.V1Toleration(effect=effect, key=key, operator=operator, toleration_seconds=toleration_seconds, value=value) print(toleration) if not toleration: msg = "{}需要提供toleration(effect,key,operator,value,)".format(action) return jsonify({"error": msg}) elif action == "delete_toleration": print("正在运行{}操作".format(action)) t = handle_input(data.get("toleration")) effect = handle_toleration_item(t.get('effect')) key = handle_toleration_item(t.get('key')) operator = handle_toleration_item(t.get('operator')) value = handle_toleration_item(t.get('value')) toleration_seconds = handle_toleraion_seconds( t.get('toleration_seconds')) print("toleration_seconds:{}".format(toleration_seconds)) # if (effect != None and key != None and operator != None): toleration = client.V1Toleration(effect=effect, key=key, operator=operator, toleration_seconds=toleration_seconds, value=value) if not toleration: msg = "{}需要提供toleration(effect,key,operator,value,)".format(action) return jsonify({"error": msg}) elif action == "add_pod_affinity": pass elif action == "delete_pod_affinity": pass elif action == "update_replicas": replicas = handle_input(data.get('replicas')) if not replicas: msg = "{}需要提供replicas".format(action) return jsonify({"error": msg}) elif action == "update_image": project = handle_input(data.get('project')) env = handle_input(data.get('env')) imageRepo = handle_input(data.get('imageRepo')) imageName = handle_input(data.get('imageName')) imageTag = handle_input(data.get('imageTag')) if (imageRepo != None and project != None and env != None and imageName != None and imageTag != None): image = "{}/{}-{}/{}:{}".format(imageRepo, project, env, imageName, imageTag) print("image值{}".format(image)) if not image: msg = "{}需要提供image".format(action) return jsonify({"error": msg}) elif action == "add_labels": pass elif action == "delete_labels": pass else: msg = "暂时不支持{}操作".format(action) print(msg) return jsonify({"error": msg}) return update_deployment_v2(deploy_name=deploy_name, namespace=namespace, action=action, image=image, replicas=replicas,toleration=toleration,node_affinity=node_affinity,\ pod_anti_affinity=pod_anti_affinity,pod_affinity=pod_affinity,labels=labels)
def export_deployment(self): # Configureate Pod template container volume_mounts = [] containers = [] volumes = [] ports = [] liveness_probe = None readiness_probe = None volume_mounts.append( client.V1VolumeMount(mount_path='/docker/logs', name='logs')) volumes.append( client.V1Volume(name='logs', host_path=client.V1HostPathVolumeSource( path='/opt/logs', type='DirectoryOrCreate'))) if self.mounts: for path in self.mounts: volume_mounts.append( client.V1VolumeMount(mount_path=path, name=self.mounts[path])) volumes.append( client.V1Volume(name=self.mounts[path], host_path=client.V1HostPathVolumeSource( path=path, type='DirectoryOrCreate'))) if self.container_port: ports = [ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ] liveness_probe = client.V1Probe( initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) readiness_probe = client.V1Probe( initial_delay_seconds=15, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))) if self.healthcheck: liveness_probe = client.V1Probe( initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int(self.container_port[0]))) readiness_probe = client.V1Probe( initial_delay_seconds=15, http_get=client.V1HTTPGetAction( path=self.healthcheck, port=int(self.container_port[0]))) Env = [ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'), client.V1EnvVar(name='POD_NAME', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='metadata.name'))), client.V1EnvVar(name='POD_IP', value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path='status.podIP'))), ] container = client.V1Container(name=self.dm_name, image=self.image, ports=ports, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) if liveness_probe and readiness_probe: container = client.V1Container( name=self.dm_name, image=self.image, ports=ports, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=liveness_probe, readiness_probe=readiness_probe) containers.append(container) if self.sidecar: sidecar_container = client.V1Container( name='sidecar-%s' % self.dm_name, image=self.sidecar, image_pull_policy='Always', env=Env, resources=client.V1ResourceRequirements( limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts) containers.append(sidecar_container) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') preference_key = self.dm_name project_values = ['xxxx'] host_aliases = [] db_docker_hosts = db_op.docker_hosts values = db_docker_hosts.query.with_entities( db_docker_hosts.ip, db_docker_hosts.hostname).filter( and_(db_docker_hosts.deployment == self.dm_name, db_docker_hosts.context == self.context)).all() db_op.DB.session.remove() if values: ips = [] for value in values: try: ip, hostname = value key = "op_docker_hosts_%s" % ip Redis.lpush(key, hostname) ips.append(ip) except Exception as e: logging.error(e) for ip in set(ips): try: key = "op_docker_hosts_%s" % ip if Redis.exists(key): hostnames = Redis.lrange(key, 0, -1) if hostnames: host_aliases.append( client.V1HostAlias(hostnames=hostnames, ip=ip)) Redis.delete(key) except Exception as e: logging.error(e) if self.labels: if 'deploy' in self.labels: preference_key = self.labels['deploy'] if 'project' in self.labels: project_values = [self.labels['project']] template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=containers, image_pull_secrets=[secrets], volumes=volumes, host_aliases=host_aliases, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key=preference_key, operator='In', values=['mark']) ]), weight=100) ], required_during_scheduling_ignored_during_execution=client. V1NodeSelector(node_selector_terms=[ client.V1NodeSelectorTerm(match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=project_values) ]) ]))))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def export_deployment(self): # Configureate Pod template container volume_mounts = [] volume_mounts.append( client.V1VolumeMount(mount_path='/opt/logs', name='logs')) if self.dm_name == 'launch': volume_mounts.append( client.V1VolumeMount(mount_path='/opt/%s/conf' % self.dm_name, name=self.dm_name)) container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=[ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8') ], resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=volume_mounts, liveness_probe=client.V1Probe( initial_delay_seconds=30, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0]))), readiness_probe=client.V1Probe( initial_delay_seconds=30, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0])))) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') volumes = [] volume = client.V1Volume( name='logs', host_path=client.V1HostPathVolumeSource(path='/opt/logs')) volumes.append(volume) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec( containers=[container], image_pull_secrets=[secrets], volumes=volumes, affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='project', operator='In', values=['moji']) ]), weight=30), client.V1PreferredSchedulingTerm( preference=client.V1NodeSelectorTerm( match_expressions=[ client.V1NodeSelectorRequirement( key='deploy', operator='In', values=[self.dm_name]) ]), weight=70) ])))) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment