def create_pod(environment): return client.V1Pod( api_version="v1", kind="Pod", metadata=client.V1ObjectMeta(name="test-pod", ), spec=client.V1PodSpec(containers=[ client.V1Container( name="test-container", image="nginx", env=[client.V1EnvVar( name="ENV", value=environment, )]) ]))
def get_deployment(self, name, namespace='default'): return client.V1Deployment( api_version='apps/v1', kind='Deployment', metadata=client.V1ObjectMeta(name=name, namespace=namespace), spec=client.V1DeploymentSpec( selector={'matchLabels': { 'app': 'fedlearner-operator' }}, template=client.V1PodTemplateSpec(spec=client.V1PodSpec( containers=[ client.V1Container(name='fedlearner-operator', args=['test']) ]))))
def get_project_pod_spec(volume_mounts, volumes, image, command, args, ports, env_vars=None, env_from=None, container_name=None, resources=None, node_selector=None, affinity=None, tolerations=None, restart_policy=None, use_service_account=False): """Pod spec to be used to create pods for project: tensorboard, notebooks.""" env_vars = get_list(env_vars) volume_mounts = get_list(volume_mounts) volumes = get_list(volumes) gpu_volume_mounts, gpu_volumes = get_gpu_volumes_def(resources) volume_mounts += gpu_volume_mounts volumes += gpu_volumes ports = [client.V1ContainerPort(container_port=port) for port in ports] env_vars += get_resources_env_vars(resources=resources) containers = [ client.V1Container(name=container_name, image=image, command=command, args=args, ports=ports, env=env_vars, env_from=env_from, resources=get_resources(resources), volume_mounts=volume_mounts) ] service_account_name = None if use_service_account and settings.K8S_RBAC_ENABLED: service_account_name = settings.K8S_SERVICE_ACCOUNT_NAME return client.V1PodSpec(restart_policy=restart_policy, service_account_name=service_account_name, containers=containers, volumes=volumes, node_selector=node_selector, affinity=affinity, tolerations=tolerations)
def _get_pod_spec(self, name=None, spec=None): ''' return V1PodSpec object ''' container_objs = [] container_name = None containers = spec.get('containers', []) for item in containers: if name: container_name = '%s-%s' % (name, containers.index(item)) container_objs.append(self._get_container(container_name, item)) spec['containers'] = container_objs spec_obj = client.V1PodSpec(**spec) return spec
def make_pod(self, player_id): pod_manifest = client.V1PodSpec( containers=[self._make_container(player_id)], service_account_name='worker') metadata = client.V1ObjectMeta( labels={ 'app': 'aimmo-game-worker', 'game': self.game_id, 'player': str(player_id) }, generate_name='aimmo-%s-worker-%s-' % (self.game_id, player_id), owner_references=self._make_owner_references()) return client.V1Pod(metadata=metadata, spec=pod_manifest)
def create_pod(value_pod, pvc_name, pod_name): """ creates pod Args: param1: value_pod - values required for creation of pod param2: pvc_name - name of pvc , pod associated with param3: pod_name - name of pod to be created Returns: None Raises: Raises an exception on kubernetes client api failure and asserts """ if value_pod["read_only"] == "True": value_pod["read_only"] = True elif value_pod["read_only"] == "False": value_pod["read_only"] = False api_instance = client.CoreV1Api() pod_metadata = client.V1ObjectMeta(name=pod_name, labels={"app": "nginx"}) pod_volume_mounts = client.V1VolumeMount( name="mypvc", mount_path=value_pod["mount_path"]) pod_ports = client.V1ContainerPort(container_port=80) pod_containers = client.V1Container(name="web-server", image="nginx:1.19.0", volume_mounts=[pod_volume_mounts], ports=[pod_ports]) pod_persistent_volume_claim = client.V1PersistentVolumeClaimVolumeSource( claim_name=pvc_name, read_only=value_pod["read_only"]) pod_volumes = client.V1Volume( name="mypvc", persistent_volume_claim=pod_persistent_volume_claim) pod_spec = client.V1PodSpec(containers=[pod_containers], volumes=[pod_volumes]) pod_body = client.V1Pod(api_version="v1", kind="Pod", metadata=pod_metadata, spec=pod_spec) try: LOGGER.info(f'creating pod {pod_name} with {str(value_pod)}') api_response = api_instance.create_namespaced_pod( namespace=namespace_value, body=pod_body, pretty=True) LOGGER.debug(str(api_response)) except ApiException as e: LOGGER.error( f"Exception when calling CoreV1Api->create_namespaced_pod: {e}") assert False
def create_pod(self, **kargs): # Container pod_resource_requests = kargs["resource_requests"] pod_resource_limits = kargs["resource_limits"] pod_resource_limits = (pod_resource_limits if pod_resource_limits else pod_resource_requests) container = client.V1Container( name=kargs["pod_name"], image=kargs["image_name"], command=kargs["command"], resources=client.V1ResourceRequirements( requests=parse_resource(pod_resource_requests), limits=parse_resource(pod_resource_limits), ), args=kargs["container_args"], image_pull_policy=kargs["image_pull_policy"], env=kargs["env"], ) # Pod spec = client.V1PodSpec( containers=[container], restart_policy=kargs["restart_policy"], priority_class_name=kargs["pod_priority"], termination_grace_period_seconds=kargs.get("termination_period", None), ) # Mount data path if kargs["volume"]: volumes, volume_mounts = parse_volume_and_mount( kargs["volume"], kargs["pod_name"]) spec.volumes = volumes container.volume_mounts = volume_mounts pod = client.V1Pod( spec=spec, metadata=client.V1ObjectMeta( name=kargs["pod_name"], labels=self._get_common_labels(), owner_references=self.create_owner_reference( kargs["owner_pod"]), namespace=self.namespace, ), ) if self.cluster: pod = self.cluster.with_pod(pod) return pod
def get_project_pod_spec(volume_mounts, volumes, image, command, args, ports, env_vars=None, env_from=None, container_name=None, resources=None, node_selector=None, affinity=None, tolerations=None, image_pull_policy=None, restart_policy=None, service_account_name=None): """Pod spec to be used to create pods for project: tensorboard, notebooks.""" volume_mounts = to_list(volume_mounts, check_none=True) volumes = to_list(volumes, check_none=True) gpu_volume_mounts, gpu_volumes = get_gpu_volumes_def(resources) volume_mounts += gpu_volume_mounts volumes += gpu_volumes ports = [client.V1ContainerPort(container_port=port) for port in ports] pod_container = get_pod_container(volume_mounts=volume_mounts, image=image, command=command, args=args, ports=ports, env_vars=env_vars, env_from=env_from, container_name=container_name, resources=resources, image_pull_policy=image_pull_policy) containers = [pod_container] if service_account_name and not conf.get(K8S_RBAC_ENABLED): service_account_name = None return client.V1PodSpec(restart_policy=restart_policy, security_context=get_security_context(), service_account_name=service_account_name, containers=containers, volumes=volumes, node_selector=node_selector, affinity=affinity, tolerations=tolerations)
def get_deployment_object(self): # Configureate Pod template container container = client.V1Container( name="watcher", image= "image-registry.openshift-image-registry.svc:5000/watcher-operator/watcher-application:latest", ports=[client.V1ContainerPort(container_port=8080)], env=[ client.V1EnvVar(name='ANNOTATION_FILTER_BOOLEAN', value=self.annotationFilterBoolean), client.V1EnvVar(name='ANNOTATION_FILTER_STRING', value=self.annotationFilterString), client.V1EnvVar(name='WATCH_NAMESPACE', value=self.watchNamespace), client.V1EnvVar(name='API_VERSION', value=self.k8sApiVersion), client.V1EnvVar(name='API_RESOURCE_NAME', value=self.k8sApiResourceName), client.V1EnvVar(name='PATH_TO_CA_PEM', value='/ca/route'), #Figure out later. client.V1EnvVar(name='JWT_TOKEN', value='141819048109481094') #Figure out later. ]) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels={"app": self.watcherApplicationName}), spec=client.V1PodSpec(service_account="watcher-application", service_account_name="watcher-application", containers=[container])) # # Create and configurate a spec section # template = client.V1PodTemplateSpec( # metadata=client.V1ObjectMeta(labels={"app": self.watcherApplicationName}), # spec=client.V1PodSpec(containers=[container])) # Create the specification of deployment spec = client.V1DeploymentSpec( replicas=1, template=template, selector={'matchLabels': { 'app': self.watcherApplicationName }}) # Instantiate the deployment object deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.watcherApplicationName), spec=spec) return deployment
def create_table_job(table_path, tablejobimageid, kubeconfigpath, dbid, namespace, dbtype, tableid, Region, archeplaydatapath): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume2 = client.V1Volume(name="kubeconfig", host_path={"path": kubeconfigpath}) volume3 = client.V1Volume(name="archeplaydata", host_path={"path": archeplaydatapath}) mount2 = client.V1VolumeMount(name="kubeconfig", mount_path="/home/app/web/kubeconfig") mount3 = client.V1VolumeMount( name="archeplaydata", mount_path="/home/app/web/archeplay/data") container = client.V1Container(name="tablejob" + tableid, image=tablejobimageid, volume_mounts=[mount2, mount3], command=[ "python", "-u", "app.py", table_path, dbid, tableid, Region ], env=[{ "name": "archeplaydatapath", "value": "/home/app/web/archeplay/data" }], image_pull_policy="Always") # Create and configurate a spec section template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( labels={"tablejob": "tablejob" + tableid}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume2, volume3])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="tablejob" + tableid), spec=spec) api_response = batch_v1.create_namespaced_job(body=job, namespace=namespace) success_message = tableid + " Deploy Job Intitated" return ("success", success_message, str(api_response.status)) except Exception as Error: error_message = tableid + " Failed to Intitate Deploy Job" return ("error", error_message, str(Error))
def create_job_object(job_arguments, size, docker_image, docker_image_tag, affinity): user = os.environ['USER'] job = client.V1Job( metadata=client.V1ObjectMeta( name='kaml-remote-{}-{}'.format(user, uuid.uuid1())), spec=client.V1JobSpec(template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name='kaml-remote-{}-{}'.format( user, uuid.uuid1()), labels={'type': size}), spec=client.V1PodSpec(containers=[ client.V1Container( name='kaml-remote', args=job_arguments, image='{}:{}'.format(docker_image, docker_image_tag), image_pull_policy='Always', env=[client.V1EnvVar(name='KAML_HOME', value='/app')], volume_mounts=[ client.V1VolumeMount(name='kaml-cfg-volume', read_only=True, mount_path='/app/kaml.cfg', sub_path='kaml.cfg'), client.V1VolumeMount( name='gcp-service-account', read_only=True, mount_path='/app/service-key.json', sub_path='service-key.json'), ]) ], affinity=affinity, volumes=[ client.V1Volume(name='kaml-cfg-volume', config_map=client. V1ConfigMapVolumeSource( name='kaml-cfg')), client.V1Volume( name='gcp-service-account', secret=client.V1SecretVolumeSource( secret_name='gcp-service-account', items=[ client.V1KeyToPath( key='service-key.json', path='service-key.json') ])) ], restart_policy='Never')))) return (job)
def create_job_object(): # Configurate env variables envs = [ client.V1EnvVar(name='JOB_TYPE', value=JOB_TYPE), client.V1EnvVar(name='FIB_N', value='35') ] # Configurate VolumeMounts volume_mount = client.V1VolumeMount(mount_path='/mnt/storage', name='storage') # Configurate resource requests and limits resources = client.V1ResourceRequirements(requests={ 'memory': '64Mi', 'cpu': '250m' }, limits={ 'memory': '128Mi', 'cpu': '500m' }) # Configurate Pod template container container = client.V1Container(name=CONTAINER_NAME, image=CONTAINER_IMAGE, env=envs, volume_mounts=[volume_mount], resources=resources) # Configure Volume template volume = client.V1Volume(name='storage', host_path={'path': '/c/minikube-pv'}) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "pi"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container], volumes=[volume])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=1) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=JOB_NAME), spec=spec) return job
def create_resource_job(resource_path, resourcejobimageid, kubeconfigpath, resourceid, state_store, code_type, serviceid, versionid, versionname, namespace): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume1 = client.V1Volume(name="buildjob" + resourceid, host_path={"path": "/var/run"}) volume2 = client.V1Volume(name="kubeconfig", host_path={"path": kubeconfigpath}) mount1 = client.V1VolumeMount(name="buildjob" + resourceid, mount_path="/var/run") mount2 = client.V1VolumeMount(name="kubeconfig", mount_path="/home/app/web/kubeconfig") container = client.V1Container(name="resourcejob" + resourceid, image=resourcejobimageid, volume_mounts=[mount1, mount2], command=[ "python3", "-u", "app.py", serviceid, versionid, resourceid, versionname, namespace ], env=[{ "name": "state_store", "value": state_store }], image_pull_policy="Always") # Create and configurate a spec section template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( labels={"resourcejob": "resourcejob" + resourceid}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume1, volume2])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="resourcejob" + resourceid), spec=spec) api_response = batch_v1.create_namespaced_job(body=job, namespace=namespace) success_message = resourceid + " Deploy Job Intitated" return ("success", success_message, str(api_response.status)) except Exception as Error: error_message = resourceid + " Failed to Intitate Deploy Job" return ("error", error_message, str(Error))
def deploy(self, image, name, ns, port, replicas=1, svc_type="NodePort", traffic_policy="Local", cluster_ip=None, ipv6=False): """ Creates a deployment and corresponding service with the given parameters. """ # Run a deployment with <replicas> copies of <image>, with the # pods labelled with "app": <name>. deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name=name), spec=client.V1DeploymentSpec( replicas=replicas, selector={'matchLabels': { 'app': name }}, template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": name}), spec=client.V1PodSpec(containers=[ client.V1Container(name=name, image=image, ports=[ client.V1ContainerPort( container_port=port) ]), ])))) api_response = client.AppsV1Api().create_namespaced_deployment( body=deployment, namespace=ns) logger.debug("Deployment created. status='%s'" % str(api_response.status)) # Create a service called <name> whose endpoints are the pods # with "app": <name>; i.e. those just created above. self.create_service(name, name, ns, port, svc_type, traffic_policy, ipv6=ipv6)
def create_deployment(self, node_name, deployment_name, pod_label, image_name, container_name, cpu_requests=None, cpu_limits=None, container_port=7000): # Load config from default location config.load_kube_config() extension = client.ExtensionsV1beta1Api() ''' container_resource = client.V1ResourceRequirements( limits={'cpu': cpu_limits}, requests={'cpu': cpu_requests}) ''' container = client.V1Container( name=container_name, image=image_name, image_pull_policy='IfNotPresent', ports=[client.V1ContainerPort(container_port=container_port)], # resources= container_resource, tty=True, stdin=True) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": pod_label}), spec=client.V1PodSpec(node_name=node_name, containers=[container])) selector = client.V1LabelSelector(match_labels={'app': pod_label}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=1, selector=selector, template=template) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=deployment_name), spec=spec) # create deployment extension.create_namespaced_deployment(namespace="default", body=deployment)
def init_pod(namespace, pod_name, image, image_name, ports, command=None): pod = client.V1Pod() pod.metadata = client.V1ObjectMeta(name=pod_name, namespace=namespace) ports = [ V1ContainerPort(host_port=int(port.split(':')[0]), container_port=int(port.split(':')[1]), protocol='TCP') for port in ports ] container = client.V1Container(name=image_name, image=image, ports=ports) print(f'正在创建pod,主机名为{pod_name}') spec = client.V1PodSpec(containers=[container], hostname=pod_name) pod.spec = spec return pod
def create_test_pod(name="test"): container = client.V1Container( name=name + "_pod_name", image=name + "_image_name", command="bash" ) # Pod spec = client.V1PodSpec(containers=[container]) pod = client.V1Pod( spec=spec, metadata=client.V1ObjectMeta( name=name + "_pod_name", namespace="elasticdl" ), ) return pod
def kube_resource_spec_to_pod_spec( kube_resource_spec: KubeResourceSpec, container: client.V1Container ): return client.V1PodSpec( containers=[container], restart_policy="Never", volumes=kube_resource_spec.volumes, service_account=kube_resource_spec.service_account, node_name=kube_resource_spec.node_name, node_selector=kube_resource_spec.node_selector, affinity=kube_resource_spec.affinity, priority_class_name=kube_resource_spec.priority_class_name if len(mlconf.get_valid_function_priority_class_names()) else None, )
def kube_resource_spec_to_pod_spec(kube_resource_spec: KubeResourceSpec, container: client.V1Container): affinity = kube_resource_spec.affinity if kube_resource_spec.affinity and isinstance(kube_resource_spec.affinity, dict): affinity = kube_resource_spec._get_affinity_as_k8s_class_instance() return client.V1PodSpec( containers=[container], restart_policy="Never", volumes=kube_resource_spec.volumes, service_account=kube_resource_spec.service_account, node_name=kube_resource_spec.node_name, node_selector=kube_resource_spec.node_selector, affinity=affinity, )
def generate_pod_spec(self, image_name, push): # pylint:disable=arguments-differ args = [ "--dockerfile=Dockerfile", "--destination={}".format(image_name), "--context={}".format(self.context_path) ] if not push: args.append("--no-push") return client.V1PodSpec(containers=[ client.V1Container( name='kaniko', image=constants.KANIKO_IMAGE, args=args, ) ], restart_policy='Never')
def parse_creat(self): data = json.load(open("file.json", "r")) for i in data: print(i["name"]) # pod = client.V1Pod(labels={"app": i["name"]}) pod = client.V1Pod() pod.metadata = client.V1ObjectMeta(name=str(i["name"] + "-" + str(time.time()))) print(time.time()) container = client.V1Container(image=i["name"], name=i["name"]) spec = client.V1PodSpec(containers=[container]) # spec.containers = [container] for node_name in i["nodes"]: spec.node_name = node_name pod.spec = spec self.core_v1.create_namespaced_pod(namespace="default", body=pod)
def create_sts_spec(node_name, workload_type): sts_name = STS_PREFIX + node_name cmd = get_workload_command(workload_type, sts_name) container = client.V1Container( name=sts_name, image=IMAGE, command=["/bin/bash"], args=["-c", cmd], liveness_probe=client.V1Probe( _exec=client.V1ExecAction(command=["ls", "/mnt/" + sts_name]), initial_delay_seconds=5, period_seconds=5), volume_mounts=[ client.V1VolumeMount(name=sts_name, mount_path="/mnt/" + sts_name) ]) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": sts_name}), spec=client.V1PodSpec( node_name=node_name, restart_policy="Always", termination_grace_period_seconds=10, containers=[container], )) spec = client.V1StatefulSetSpec( replicas=0, service_name=sts_name, selector=client.V1LabelSelector(match_labels={"app": sts_name}), template=template, volume_claim_templates=[ client.V1PersistentVolumeClaim( metadata=client.V1ObjectMeta(name=sts_name), spec=client.V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], storage_class_name="longhorn", resources=client.V1ResourceRequirements( requests={"storage": "4Gi"}))) ]) statefulset = client.V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=client.V1ObjectMeta(name=sts_name), spec=spec) statefulset.spec.replicas return statefulset
def create_pod(self): pod = client.V1Pod() pod.metadata = client.V1ObjectMeta(name=self.replica_name, labels={ "pod_name": self.replica_name, "job_name": self.job_name }) container_spec = self.template["spec"]["containers"][0] # add container properties to container spec if 'env' in self.container_params: # add env variables to container_spec container_spec['env'] = [ client.V1EnvVar(name=k, value=str(v)) for k, v in self.container_params['env'].items() ] if 'volumes' in self.container_params: # add volumes spec to container # TODO: look at how to mount a volume (volumeMounts vs volumeDevices?) container_spec['volumeMounts'] = [] # V1Container accepts keys defined in V1Container.attribute_map. # The current spec is defined with camel case keys, so we need to map back # to underscore keys. inv_map = { k: container_spec[v] for k, v in client.V1Container.attribute_map.items() if v in container_spec } pod_spec = client.V1PodSpec(containers=[client.V1Container(**inv_map)], hostname=self.replica_name) pod.spec = pod_spec self.api_instance.create_namespaced_pod(namespace=settings.NAMESPACE, body=pod) logger.info( f"Created Pod for replica {self.replica_name} with container spec {inv_map}" ) # TODO: Need to solve the Hostname resolve. Now only solution is to use direct IP address. # if scheduler, we need to get scheduler IP address and inject it in the other pods. if self.replica_type == "SCHEDULER": selector = f"pod_name={self.replica_name}" pod_list = client.models.v1_pod_list.V1PodList(items=[]) while len(pod_list.items) == 0: pod_list = self.api_instance.list_namespaced_pod( settings.NAMESPACE, label_selector=selector) self.scheduler_ip = pod_list.items[0].status.pod_ip
def export_deployment(self): # Configureate Pod template container container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=[ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8') ], resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=[ client.V1VolumeMount(mount_path='/opt/logs', name='logs') ], liveness_probe=client.V1Probe( initial_delay_seconds=5, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0])))) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') volume = client.V1Volume( name='logs', host_path=client.V1HostPathVolumeSource(path='/opt/logs')) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secrets], volumes=[volume])) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def create_job_object(runner_image, region, s3_path, pvc_name): target_folder = get_target_folder(s3_path) # Configureate Pod template container container = k8s_client.V1Container( name="copy-dataset-worker", image=runner_image, command=["aws"], args=["s3", "sync", s3_path, "/mnt/" + target_folder], volume_mounts=[ k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt') ], env=[ k8s_client.V1EnvVar(name="AWS_REGION", value=region), k8s_client.V1EnvVar( name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( key="AWS_ACCESS_KEY_ID", name="aws-secret"))), k8s_client.V1EnvVar( name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( key="AWS_SECRET_ACCESS_KEY", name="aws-secret"))) ], ) volume = k8s_client.V1Volume( name='data-storage', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource( claim_name=pvc_name)) # Create and configurate a spec section template = k8s_client.V1PodTemplateSpec( # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}), spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure")) # Create the specification of deployment spec = k8s_client.V1JobSpec( # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}), template=template) # Instantiate the deployment object deployment = k8s_client.V1Job( api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta(name=container.name), spec=spec) return deployment
def create_stateful_set(self, stsName, namespace, replicas, containers, volumes, volumeClaimTemplates): api_version = 'apps/v1' kind = 'StatefulSet' metadata = client.V1ObjectMeta(name=stsName, namespace=namespace) # Build spec_selector spec_selector_match_labels = dict() spec_selector_match_labels['name'] = stsName spec_selector_match_labels['namespace'] = namespace spec_selector = client.V1LabelSelector( match_labels=spec_selector_match_labels) # Build spec_template spec_template_metadata_labels = dict() spec_template_metadata_labels['name'] = stsName spec_template_metadata_labels['namespace'] = namespace spec_template_metadata = client.V1ObjectMeta( labels=spec_template_metadata_labels) spec_template_spec = client.V1PodSpec(containers=containers, volumes=volumes) spec_template = client.V1PodTemplateSpec( metadata=spec_template_metadata, spec=spec_template_spec) # Build spec spec = client.V1StatefulSetSpec( service_name=stsName, replicas=replicas, selector=spec_selector, template=spec_template, volume_claim_templates=volumeClaimTemplates) # Build body body = client.V1StatefulSet(api_version=api_version, kind=kind, metadata=metadata, spec=spec) # Create stateful set try: api_response = self.appsApi.create_namespaced_stateful_set( namespace=namespace, body=body) print('api_response: ', api_response) except ApiException as e: return hiss.hiss( "Exception when calling AppsV1Api->create_namespaced_stateful_set: %s\n" % e)
def create_pod_object(self): config.load_incluster_config() container = client.V1Container( name="eddy-automl", image="eddyanalytics/eddy-automl", ports=[client.V1ContainerPort(container_port=8888)], env=[V1EnvVar("BOOTSTRAP_SERVER", kafka_address)], command=["python", "main.py"], args=[self.input_topic, self.output_topic, str(self.target_col)]) spec = client.V1PodSpec(containers=[container]) metadata = client.V1ObjectMeta(generate_name="automl-", namespace=namespace) # Instantiate the deployment object pod = client.V1Pod(kind="Pod", spec=spec, metadata=metadata) return pod
def pod_spec(self): return client.V1PodSpec( containers=[ client.V1Container( name=self.kube_name, image=self.raw_name, ports=[client.V1ContainerPort(container_port=5000)], image_pull_policy='Never', resources=client.V1ResourceRequirements( requests={ 'cpu': '100m', }, ), ), ] )
def create_pod(id): config.load_incluster_config() v1 = client.CoreV1Api() pod = client.V1Pod() pod.metadata = client.V1ObjectMeta(name="job-consumer-" + str(id)) container = client.V1Container(name="job-consumer-" + str(id), image="job-consumer:latest", image_pull_policy="Never") spec = client.V1PodSpec(containers=[container], restart_policy="Always") pod.spec = spec return v1.create_namespaced_pod(namespace="default", body=pod)
def test_pod_resources_pod_without_node(self) -> None: pod = client.V1Pod(spec=client.V1PodSpec(containers=[ client.V1Container( name="non_scheduled_container", resources=client.V1ResourceRequirements(limits=None, requests=None), ), ], ), ) self.assertEqual( pod_resources(pod), api.PodUsageResources( cpu=api.Resources(limit=float("inf"), requests=0.0), memory=api.Resources(limit=float("inf"), requests=0.0), ), )