def _volume_mounts(kube_manager, pod_spec, namespace): #pylint:disable=unused-argument volume_mount = client.V1VolumeMount(name=mount_name, mount_path=mount_path, sub_path=sub_path) if pod_spec.containers[0].volume_mounts: pod_spec.containers[0].volume_mounts.append(volume_mount) else: pod_spec.containers[0].volume_mounts = [volume_mount] if volume_type == 'pvc': volume = client.V1Volume( name=mount_name, persistent_volume_claim=client. V1PersistentVolumeClaimVolumeSource(claim_name=volume_name)) elif volume_type == 'secret': volume = client.V1Volume( name=mount_name, secret=client.V1SecretVolumeSource(secret_name=volume_name)) elif volume_type == 'config_map': volume = client.V1Volume( name=mount_name, config_map=client.V1ConfigMapVolumeSource(name=volume_name)) else: raise RuntimeError("Unsupport type %s" % volume_type) if pod_spec.volumes: pod_spec.volumes.append(volume) else: pod_spec.volumes = [volume]
def _mount_v3iod(task): from kubernetes import client as k8s_client def add_vol(name, mount_path, host_path): vol = k8s_client.V1Volume( name=name, host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=''), ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount(mount_path=mount_path, name=name) ) add_vol(name='shm', mount_path='/dev/shm', host_path='/dev/shm/' + namespace) add_vol( name='v3iod-comm', mount_path='/var/run/iguazio/dayman', host_path='/var/run/iguazio/dayman/' + namespace, ) vol = k8s_client.V1Volume( name='daemon-health', empty_dir=k8s_client.V1EmptyDirVolumeSource() ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount( mount_path='/var/run/iguazio/daemon_health', name='daemon-health' ) ) vol = k8s_client.V1Volume( name='v3io-config', config_map=k8s_client.V1ConfigMapVolumeSource( name=v3io_config_configmap, default_mode=420 ), ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/etc/config/v3io', name='v3io-config') ) # vol = k8s_client.V1Volume(name='v3io-auth', # secret=k8s_client.V1SecretVolumeSource(secret_name=v3io_auth_secret, # default_mode=420)) # task.add_volume(vol).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/igz/.igz', name='v3io-auth')) task.add_env_variable( k8s_client.V1EnvVar( name='CURRENT_NODE_IP', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( api_version='v1', field_path='status.hostIP' ) ), ) ) task.add_env_variable( k8s_client.V1EnvVar( name='IGZ_DATA_CONFIG_FILE', value='/igz/java/conf/v3io.conf' ) ) return task
def attach_output_volume(op): """Attaches emptyDir volumes to container operations. See https://github.com/kubeflow/pipelines/issues/1654 """ # Handle auto-generated pipeline metadata op.output_artifact_paths = { 'mlpipeline-ui-metadata': '/output/mlpipeline-ui-metadata.json', 'mlpipeline-metrics': '/output/mlpipeline-metrics.json', } # Add somewhere to store regular output op.add_volume( k8s_client.V1Volume(name='volume', empty_dir=k8s_client.V1EmptyDirVolumeSource())) op.container.add_volume_mount( k8s_client.V1VolumeMount(name='volume', mount_path='/output')) # func_to_container_op wants to store outputs under /tmp/outputs op.add_volume( k8s_client.V1Volume(name='outputs', empty_dir=k8s_client.V1EmptyDirVolumeSource())) op.container.add_volume_mount( k8s_client.V1VolumeMount(name='outputs', mount_path='/tmp/outputs')) return op
def submit(self): """Submit a image spec to openshift's s2i and wait for completion """ volume_mounts = [ client.V1VolumeMount(mount_path="/var/run/docker.sock", name="docker-socket") ] volumes = [ client.V1Volume(name="docker-socket", host_path=client.V1HostPathVolumeSource( path="/var/run/docker.sock")) ] if self.push_secret: volume_mounts.append( client.V1VolumeMount(mount_path="/root/.docker", name='docker-push-secret')) volumes.append( client.V1Volume(name='docker-push-secret', secret=client.V1SecretVolumeSource( secret_name=self.push_secret))) self.pod = client.V1Pod(metadata=client.V1ObjectMeta( name=self.name, labels={"name": self.name}), spec=client.V1PodSpec(containers=[ client.V1Container( image=self.builder_image, name="builder", args=self.get_cmd(), image_pull_policy='Always', volume_mounts=volume_mounts, ) ], volumes=volumes, restart_policy="Never")) try: ret = self.api.create_namespaced_pod(self.namespace, self.pod) except client.rest.ApiException as e: if e.status == 409: # Someone else created it! pass else: raise w = watch.Watch() try: for f in w.stream(self.api.list_namespaced_pod, self.namespace, label_selector="name={}".format(self.name)): if f['type'] == 'DELETED': self.progress('pod.phasechange', 'Deleted') return self.pod = f['object'] self.progress('pod.phasechange', self.pod.status.phase) if self.pod.status.phase == 'Succeeded': self.cleanup() elif self.pod.status.phase == 'Failed': self.cleanup() finally: w.stop()
def k8s_object(self): depl = client.AppsV1beta1Deployment( metadata=client.V1ObjectMeta( name=self.name, labels=self.labels ), spec=client.AppsV1beta1DeploymentSpec( strategy=client.AppsV1beta1DeploymentStrategy( type='RollingUpdate', rolling_update=client.AppsV1beta1RollingUpdateDeployment( max_surge=0 ) ), template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels=self.template_labels), spec=client.V1PodSpec( affinity=client.V1Affinity( pod_anti_affinity=client.V1PodAntiAffinity( required_during_scheduling_ignored_during_execution=[ {"topologyKey": e2e_globals.ANTI_AFFINITY_KEY}, ] ), ), volumes=[client.V1Volume( name='data', config_map=client.V1ConfigMapVolumeSource( name=self.cfgmap_name) )] , containers=[client.V1Container( image=e2e_globals.TEST_DEPLOYMENT_IMAGE, name="testapp", volume_mounts=[client.V1VolumeMount( name='data', mount_path='/usr/share/nginx/html') ], ports=[client.V1ContainerPort( container_port=e2e_globals.TEST_CONTAINER_PORT)], resources=client.V1ResourceRequirements( requests={ 'cpu': '1m', 'memory': '1Mi', }, ), )])), replicas=self.replicas) ) if self.vol_claim_name is not None: volume = client.V1Volume(name='test-volume', persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource( claim_name=self.vol_claim_name)) mount = client.V1VolumeMount( name='test-volume', mount_path='/usr/blank' ) depl.spec.template.spec.containers[0].volume_mounts.append(mount) depl.spec.template.spec.volumes.append(volume) return depl
def create_generator_job(kubeconfigpath,serviceid,namespace,archeplaydatapath,generatorpath): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume1=client.V1Volume( name="generatorjob"+serviceid, host_path={"path": "/var/run"} ) volume2=client.V1Volume( name="kubeconfig", host_path={"path": kubeconfigpath} ) volume3=client.V1Volume( name="archeplaydata", host_path={"path":archeplaydatapath} ) mount1 = client.V1VolumeMount( name="generatorjob"+serviceid, mount_path="/var/run" ) mount2 = client.V1VolumeMount( name="kubeconfig", mount_path="/home/app/web/kubeconfig" ) mount3 = client.V1VolumeMount( name="archeplaydata", mount_path="/home/app/web/archeplay/data" ) container = client.V1Container( name="generatorjob"+serviceid, image="python:3.8.1-slim-buster", volume_mounts=[mount1,mount2,mount3], command=["bash",generatorpath], image_pull_policy="Always" ) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"generatorjob": "generatorjob"+serviceid}), spec=client.V1PodSpec(restart_policy="Never", containers=[container],volumes=[volume1,volume2,volume3])) # Create the specification of deployment spec = client.V1JobSpec( template=template, backoff_limit=0 ) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="generatorjob"+serviceid), spec=spec) api_response = batch_v1.create_namespaced_job( body=job, namespace=namespace) success_message = "Generator Job Intitated" return("success",success_message,str(api_response.status)) except Exception as Error: error_message="Generator job Failed to Intitate Deploy Job" return("error",error_message,str(Error))
def dispatch(endpoint, access_key, secret_key, bucket_name, object_name): job_name = f"local-rebuild-{uuid.uuid1()}" downloader_env = [ client.V1EnvVar(name="ENDPOINT", value=endpoint), client.V1EnvVar(name="ACCESS_KEY", value=access_key), client.V1EnvVar(name="SECRET_KEY", value=secret_key), client.V1EnvVar(name="BUCKET_NAME", value=bucket_name), client.V1EnvVar(name="OBJECT_NAME", value=object_name) ] downloader_container = client.V1Container( name="downloader", image=os.getenv("DOWNLOADER_IMAGE"), env=downloader_env, volume_mounts=[client.V1VolumeMount( name="processor-input", mount_path="/output")]) processor_container = client.V1Container( name="processor", image=os.getenv("PROCESSOR_IMAGE"), volume_mounts=[client.V1VolumeMount(name="processor-input", mount_path="/input", read_only=True), client.V1VolumeMount(name="processor-output", mount_path="/output")]) pod_spec = client.V1PodSpec( restart_policy="Never", init_containers=[downloader_container], containers=[processor_container], volumes=[ client.V1Volume(name="processor-input", empty_dir=client.V1EmptyDirVolumeSource()), client.V1Volume(name="processor-output", empty_dir=client.V1EmptyDirVolumeSource()), ]) # Create and configure a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name=job_name, labels={ "app": "local-rebuild-processor"}), spec=pod_spec) # Create the specification of the job spec = client.V1JobSpec( template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=job_name, labels={ "app": "local-rebuild-processor"}), spec=spec) client.BatchV1Api().create_namespaced_job( body=job, namespace="default")
def get_obj(self): """ :description: Generate volume spec. """ if self.config.get("configmap"): return client.V1Volume(name=self.slug, config_map=client.V1ConfigMapVolumeSource( name=self.config.get("configmap"))) return client.V1Volume(name=self.slug, empty_dir=client.V1EmptyDirVolumeSource())
def get_volume(volume, claim_name=None, volume_mount=None): if claim_name: pv_claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=claim_name) return client.V1Volume(name=volume, persistent_volume_claim=pv_claim) elif volume_mount: return client.V1Volume( name=volume, host_path=client.V1HostPathVolumeSource(path=volume_mount)) else: empty_dir = client.V1EmptyDirVolumeSource() return client.V1Volume(name=volume, empty_dir=empty_dir)
def get_gpu_volumes(): return [ client.V1Volume(name='nvidia-bin', host_path=client.V1HostPathVolumeSource( path=settings.DIRS_NVIDIA.get('bin'))), client.V1Volume(name='nvidia-lib', host_path=client.V1HostPathVolumeSource( path=settings.DIRS_NVIDIA.get('lib'))), client.V1Volume(name='nvidia-libcuda', host_path=client.V1HostPathVolumeSource( path=settings.DIRS_NVIDIA.get('libcuda'))), ]
def create_job_object(job_type, fib_n): # Configurate env variables envs = [ client.V1EnvVar(name='JOB_TYPE', value=job_type), client.V1EnvVar(name='FIB_N', value=fib_n) ] # Configurate VolumeMounts volume_mount = client.V1VolumeMount(mount_path='/mnt/storage', name='storage') # Configurate resource requests and limits resources = client.V1ResourceRequirements(requests={ 'memory': '64Mi', 'cpu': '250m' }, limits={ 'memory': '128Mi', 'cpu': '500m' }) # Configurate Pod template container container = client.V1Container(name=CONTAINER_NAME, image=CONTAINER_IMAGE, env=envs, volume_mounts=[volume_mount], resources=resources) # Configure Volume template if job_type == JOB_STATEFUL: volume = client.V1Volume(name='storage', host_path={'path': '/c/minikube-pv'}) else: volume = client.V1Volume(name='storage', empty_dir={}) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "job-app"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container], volumes=[volume])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=1) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(generate_name=JOB_NAME), spec=spec) return job
def get_gpu_volumes(): dirs_nvidia = conf.get(DIRS_NVIDIA) return [ client.V1Volume( name='nvidia-bin', host_path=client.V1HostPathVolumeSource(path=dirs_nvidia.get('bin'))), client.V1Volume( name='nvidia-lib', host_path=client.V1HostPathVolumeSource(path=dirs_nvidia.get('lib'))), client.V1Volume( name='nvidia-libcuda', host_path=client.V1HostPathVolumeSource(path=dirs_nvidia.get('libcuda'))), ]
def get_volume(volume, claim_name=None, host_path=None, read_only=None): if claim_name: pv_claim = client.V1PersistentVolumeClaimVolumeSource( claim_name=claim_name, read_only=read_only) return client.V1Volume(name=volume, persistent_volume_claim=pv_claim) if host_path: return client.V1Volume( name=volume, host_path=client.V1HostPathVolumeSource(path=host_path)) empty_dir = client.V1EmptyDirVolumeSource() return client.V1Volume(name=volume, empty_dir=empty_dir)
def _mount_v3iod(task): from kubernetes import client as k8s_client def add_vol(name, mount_path, host_path): vol = k8s_client.V1Volume( name=name, host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=""), ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount(mount_path=mount_path, name=name)) add_vol(name="shm", mount_path="/dev/shm", host_path="/dev/shm/" + namespace) add_vol( name="v3iod-comm", mount_path="/var/run/iguazio/dayman", host_path="/var/run/iguazio/dayman/" + namespace, ) vol = k8s_client.V1Volume( name="daemon-health", empty_dir=k8s_client.V1EmptyDirVolumeSource()) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount( mount_path="/var/run/iguazio/daemon_health", name="daemon-health")) vol = k8s_client.V1Volume( name="v3io-config", config_map=k8s_client.V1ConfigMapVolumeSource( name=v3io_config_configmap, default_mode=420), ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/etc/config/v3io", name="v3io-config")) task.add_env_variable( k8s_client.V1EnvVar( name="CURRENT_NODE_IP", value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( api_version="v1", field_path="status.hostIP")), )) task.add_env_variable( k8s_client.V1EnvVar(name="IGZ_DATA_CONFIG_FILE", value="/igz/java/conf/v3io.conf")) return task
def create_resource_job(resource_path, resourcejobimageid, kubeconfigpath, resourceid, state_store, code_type, serviceid, versionid, versionname, namespace): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume1 = client.V1Volume(name="buildjob" + resourceid, host_path={"path": "/var/run"}) volume2 = client.V1Volume(name="kubeconfig", host_path={"path": kubeconfigpath}) mount1 = client.V1VolumeMount(name="buildjob" + resourceid, mount_path="/var/run") mount2 = client.V1VolumeMount(name="kubeconfig", mount_path="/home/app/web/kubeconfig") container = client.V1Container(name="resourcejob" + resourceid, image=resourcejobimageid, volume_mounts=[mount1, mount2], command=[ "python3", "-u", "app.py", serviceid, versionid, resourceid, versionname, namespace ], env=[{ "name": "state_store", "value": state_store }], image_pull_policy="Always") # Create and configurate a spec section template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( labels={"resourcejob": "resourcejob" + resourceid}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume1, volume2])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="resourcejob" + resourceid), spec=spec) api_response = batch_v1.create_namespaced_job(body=job, namespace=namespace) success_message = resourceid + " Deploy Job Intitated" return ("success", success_message, str(api_response.status)) except Exception as Error: error_message = resourceid + " Failed to Intitate Deploy Job" return ("error", error_message, str(Error))
def demo(): ''' data_path = "/home/newnfs/hyperai_data/Foundation/AID", user_name = "admin",data_job_id = "20191203-1951-data",model_job_id = "20191203-1951-model",var_job_id = "20191203-1951-var",lr = 0.0010000000475,epoch = 2520,batch_size = 8 :return: ''' detection_data_path = "/root/aircraft" user_name = "admin" detection_job_id = "20200514-detection" data = ReadyData(detection_data_path, detection_job_id, user_name).add_volume(k8s_client.V1Volume( name='nfs-storage', host_path=k8s_client.V1LocalVolumeSource(path="/home/newnfs"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/newnfs", name='nfs-storage')).add_node_selector_constraint('beta.kubernetes.io/arch', 'amd64') camb = CambARMExecute(data.output, detection_job_id, user_name).add_volume(k8s_client.V1Volume( name='nfs-storage', host_path=k8s_client.V1LocalVolumeSource(path="/home/newnfs"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/newnfs", name='nfs-storage')).add_volume(k8s_client.V1Volume( name='aaa', host_path=k8s_client.V1LocalVolumeSource(path="/sys/kernel/debug"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/sys/kernel/debug", name='aaa')).add_volume(k8s_client.V1Volume( name='bbb', host_path=k8s_client.V1LocalVolumeSource(path="/tmp/.X11-unix"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/tmp/.X11-unix", name='bbb')).add_volume(k8s_client.V1Volume( name='ccc', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/v8.2_arm"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/Cambricon-Test-v8.2_arm", name='ccc')).add_volume(k8s_client.V1Volume( name='ddd', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/arm_v8.0/v8.0_arm/ARM64-v8.0/arm64/congcan"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/congcan", name='ddd')).add_volume(k8s_client.V1Volume( name='eee', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/v8.0/Cambricon-MLU100/datasets"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/datasets", name='eee')).add_volume(k8s_client.V1Volume( name='fff', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/v8.0/Cambricon-MLU100/models"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/models", name='fff')).add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64') camb.add_resource_limit("cambricon.com/mlu", "1") # 挂载节点上的设备驱动 device_name = "dev-cambricon" camb.add_volume(k8s_client.V1Volume(name=device_name, host_path=k8s_client.V1HostPathVolumeSource( path="/dev/cambricon_c10Dev0"))).add_volume_mount( k8s_client.V1VolumeMount(name=device_name, mount_path="/dev/cambricon_c10Dev0")).add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64')
def create_job_object(job_arguments, size, docker_image, docker_image_tag, affinity): user = os.environ['USER'] job = client.V1Job( metadata=client.V1ObjectMeta( name='kaml-remote-{}-{}'.format(user, uuid.uuid1())), spec=client.V1JobSpec(template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name='kaml-remote-{}-{}'.format( user, uuid.uuid1()), labels={'type': size}), spec=client.V1PodSpec(containers=[ client.V1Container( name='kaml-remote', args=job_arguments, image='{}:{}'.format(docker_image, docker_image_tag), image_pull_policy='Always', env=[client.V1EnvVar(name='KAML_HOME', value='/app')], volume_mounts=[ client.V1VolumeMount(name='kaml-cfg-volume', read_only=True, mount_path='/app/kaml.cfg', sub_path='kaml.cfg'), client.V1VolumeMount( name='gcp-service-account', read_only=True, mount_path='/app/service-key.json', sub_path='service-key.json'), ]) ], affinity=affinity, volumes=[ client.V1Volume(name='kaml-cfg-volume', config_map=client. V1ConfigMapVolumeSource( name='kaml-cfg')), client.V1Volume( name='gcp-service-account', secret=client.V1SecretVolumeSource( secret_name='gcp-service-account', items=[ client.V1KeyToPath( key='service-key.json', path='service-key.json') ])) ], restart_policy='Never')))) return (job)
def create_table_job(table_path, tablejobimageid, kubeconfigpath, dbid, namespace, dbtype, tableid, Region, archeplaydatapath): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume2 = client.V1Volume(name="kubeconfig", host_path={"path": kubeconfigpath}) volume3 = client.V1Volume(name="archeplaydata", host_path={"path": archeplaydatapath}) mount2 = client.V1VolumeMount(name="kubeconfig", mount_path="/home/app/web/kubeconfig") mount3 = client.V1VolumeMount( name="archeplaydata", mount_path="/home/app/web/archeplay/data") container = client.V1Container(name="tablejob" + tableid, image=tablejobimageid, volume_mounts=[mount2, mount3], command=[ "python", "-u", "app.py", table_path, dbid, tableid, Region ], env=[{ "name": "archeplaydatapath", "value": "/home/app/web/archeplay/data" }], image_pull_policy="Always") # Create and configurate a spec section template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( labels={"tablejob": "tablejob" + tableid}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume2, volume3])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="tablejob" + tableid), spec=spec) api_response = batch_v1.create_namespaced_job(body=job, namespace=namespace) success_message = tableid + " Deploy Job Intitated" return ("success", success_message, str(api_response.status)) except Exception as Error: error_message = tableid + " Failed to Intitate Deploy Job" return ("error", error_message, str(Error))
def _use_gcp_secret(task): from kubernetes import client as k8s_client task = task.add_volume( k8s_client.V1Volume(name=volume_name, secret=k8s_client.V1SecretVolumeSource( secret_name=secret_name, ))) task.container \ .add_volume_mount( k8s_client.V1VolumeMount( name=volume_name, mount_path=secret_volume_mount_path, ) ) \ .add_env_variable( k8s_client.V1EnvVar( name='GOOGLE_APPLICATION_CREDENTIALS', value=secret_volume_mount_path + secret_file_path_in_volume, ) ) \ .add_env_variable( k8s_client.V1EnvVar( name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE', value=secret_volume_mount_path + secret_file_path_in_volume, ) ) # Set GCloud Credentials by using the env var override. # TODO: Is there a better way for GCloud to pick up the credential? return task
def create_job(self, name, image, cmd, path): container = client.V1Container( name=name, image=image, env=[client.V1EnvVar(name='PYTHONUNBUFFERED', value='0')], command=cmd, volume_mounts=[ client.V1VolumeMount( name=name + "-volume", mount_path="/root", ) ]) volume = client.V1Volume(name=name + "-volume", host_path=client.V1HostPathVolumeSource( path=path, )) template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( name=name, labels={"user": self.user}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume], )) spec = client.V1JobSpec(template=template) job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=name), spec=spec) client.BatchV1Api().create_namespaced_job(namespace=self.namespace, body=job)
def add_azure_credentials(kube_manager, pod_spec, namespace): if not kube_manager.secret_exists(constants.AZURE_CREDS_SECRET_NAME, namespace): raise ValueError( "Unable to mount credentials: " + f"Secret {constants.AZURE_CREDS_SECRET_NAME} found in namespace {namespace}" ) # Set appropriate secrets and volumes to enable kubeflow-user service # account. logging.warn("Adding azure auth location") env_var = client.V1EnvVar(name='AZURE_AUTH_LOCATION', value='/etc/secrets/azure-credentials.json') if pod_spec.containers[0].env: pod_spec.containers[0].env.append(env_var) else: pod_spec.containers[0].env = [env_var] volume_mount = client.V1VolumeMount(name='azure-credentials', mount_path='/etc/secrets', read_only=True) if pod_spec.containers[0].volume_mounts: pod_spec.containers[0].volume_mounts.append(volume_mount) else: pod_spec.containers[0].volume_mounts = [volume_mount] volume = client.V1Volume( name='azure-credentials', secret=client.V1SecretVolumeSource( secret_name=constants.AZURE_CREDS_SECRET_NAME)) if pod_spec.volumes: pod_spec.volumes.append(volume) else: pod_spec.volumes = [volume]
def dockerjson_pv( pull_secret, name=None, filename='.dockerconfigjson', project_to='/kaniko/.docker/config.json'): """ Creates V1Volume volume projection from kubernetes pull secret """ from os.path import basename, dirname from kubernetes import client if not name: from uuid import uuid1 name='vol-' + str(uuid1())[:12] return k8sc.V1Volume( name=name, projected=k8sc.V1ProjectedVolumeSource(sources=[ k8sc.V1VolumeProjection( secret=k8sc.V1SecretProjection( name=pull_secret, items=[k8sc.V1KeyToPath(key=filename, path=basename(project_to))] ) ) ]) )
def mount_cfgmap(self, name, path="/config"): self.add_volume( client.V1Volume( name=name, config_map=client.V1ConfigMapVolumeSource(name=name) ), mount_path=path, )
def add_gcp_credentials(kube_manager, pod_spec, namespace): if not kube_manager.secret_exists(constants.GCP_CREDS_SECRET_NAME, namespace): raise ValueError( 'Unable to mount credentials: ' + 'Secret user-gcp-sa not found in namespace {}'.format(namespace)) # Set appropriate secrets and volumes to enable kubeflow-user service # account. env_var = client.V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS', value='/etc/secrets/user-gcp-sa.json') if pod_spec.containers[0].env: pod_spec.containers[0].env.append(env_var) else: pod_spec.containers[0].env = [env_var] volume_mount = client.V1VolumeMount(name='user-gcp-sa', mount_path='/etc/secrets', read_only=True) if pod_spec.containers[0].volume_mounts: pod_spec.containers[0].volume_mounts.append(volume_mount) else: pod_spec.containers[0].volume_mounts = [volume_mount] volume = client.V1Volume( name='user-gcp-sa', secret=client.V1SecretVolumeSource(secret_name='user-gcp-sa')) if pod_spec.volumes: pod_spec.volumes.append(volume) else: pod_spec.volumes = [volume]
def create_job_object(self, job_name, container_image, args): volume_name = "" # volume inside of which you put your service account google_app_credentials_path = os.environ.get( 'GOOGLE_APPLICATION_CREDENTIALS') volume_mount = client.V1VolumeMount(mount_path='/'.join( google_app_credentials_path.split('/')[:-1]), name=volume_name) env = client.V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS', value=google_app_credentials_path) container = client.V1Container(name=job_name, image=container_image, args=args, volume_mounts=[volume_mount], env=[env], image_pull_policy="Always") volume = client.V1Volume( name=volume_name, secret=client.V1SecretVolumeSource( secret_name='<secret-where-you-put-the-service-account>')) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "sample"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container], volumes=[volume])) spec = client.V1JobSpec(template=template, backoff_limit=3, ttl_seconds_after_finished=60) job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=job_name), spec=spec) return job
def __init__(self, name, storage_pvc): internal_volume_name = 'nfs-server-volume' mount_path = '/exports' ctr = client.V1Container( name=name, image=self.IMAGE, ports=[ client.V1ContainerPort(name=k, container_port=v) for k, v in self.PORTS.items() ], security_context=client.V1SecurityContext(privileged=True), volume_mounts=[ client.V1VolumeMount(mount_path=mount_path, name=internal_volume_name) ]) volume_source = client.V1PersistentVolumeClaimVolumeSource( claim_name=storage_pvc) volume = client.V1Volume(name=internal_volume_name, persistent_volume_claim=volume_source) pod_spec = client.V1PodSpec(containers=[ctr], volumes=[volume]) pod_metadata = client.V1ObjectMeta(labels=self.LABELS) pod_template = client.V1PodTemplateSpec(metadata=pod_metadata, spec=pod_spec) rs_spec = client.V1ReplicaSetSpec( replicas=1, selector=client.V1LabelSelector(match_labels=self.LABELS), template=pod_template) metadata = client.V1ObjectMeta(name=name, labels=self.LABELS) super(NFSDeployment, self).__init__(metadata=metadata, spec=rs_spec)
def mount_secret(self, name, path='/secret', items=None): self.add_volume(client.V1Volume(name=name, secret=client.V1SecretVolumeSource( secret_name=name, items=items, )), mount_path=path)
def create_deployment_object(name): # Configureate Pod template container container = client.V1Container( name="minecraft", image="openhack/minecraft-server:2.0", ports=[ client.V1ContainerPort(container_port=25565), client.V1ContainerPort(container_port=25575) ], volume_mounts=[ client.V1VolumeMount(name="volume", mount_path="/data") ], env=[client.V1EnvVar(name="EULA", value="true")]) volumes = client.V1Volume( name="volume", persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource( claim_name="azure-managed-disk")) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": name}), spec=client.V1PodSpec(containers=[container], volumes=[volumes])) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=1, template=template) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=name), spec=spec) return deployment
def create_volume(volume_data): if "name" in volume_data: volume = client.V1Volume(name=volume_data["name"]) # persistent claim if "persistentVolumeClaim" in volume_data.has_key: volume_pvc = volume_data["persistentVolumeClaim"] if "claimName" in volume_pvc: pvc = client.V1PersistentVolumeClaimVolumeSource( claim_name=volume_pvc["claimName"]) volume.persistent_volume_claim = pvc # hostpath if "hostPath" in volume_data and "path" in volume_data["hostPath"]: host_path = client.V1HostPathVolumeSource( path=volume_data["hostPath"]["path"]) if "hostPath" in volume_data and "type" in volume_data["hostPath"]: host_path.type = volume_data["hostPath"]["type"] volume.host_path = host_path # nfs if ("nfs" in volume_data and "path" in volume_data["nfs"] and "server" in volume_data["nfs"]): volume.nfs = client.V1NFSVolumeSource( path=volume_data["nfs"]["path"], server=volume_data["nfs"]["server"]) return volume return None
def add_azure_files(kube_manager, pod_spec, namespace): context_hash = pod_spec.containers[0].args[1].split(':')[-1] secret_name = constants.AZURE_STORAGE_CREDS_SECRET_NAME_PREFIX + context_hash.lower( ) if not kube_manager.secret_exists(secret_name, namespace): raise Exception("Secret '{}' not found in namespace '{}'".format( secret_name, namespace)) volume_mount = client.V1VolumeMount(name='azure-files', mount_path='/mnt/azure/', read_only=True) if pod_spec.containers[0].volume_mounts: pod_spec.containers[0].volume_mounts.append(volume_mount) else: pod_spec.containers[0].volume_mounts = [volume_mount] volume = client.V1Volume( name='azure-files', azure_file=client.V1AzureFileVolumeSource( secret_name=secret_name, share_name=constants.AZURE_FILES_SHARED_FOLDER)) if pod_spec.volumes: pod_spec.volumes.append(volume) else: pod_spec.volumes = [volume]