def get_gpu_volumes(): return [ client.V1Volume(name='nvidia-bin', host_path=client.V1HostPathVolumeSource( path=settings.DIRS_NVIDIA.get('bin'))), client.V1Volume(name='nvidia-lib', host_path=client.V1HostPathVolumeSource( path=settings.DIRS_NVIDIA.get('lib'))), client.V1Volume(name='nvidia-libcuda', host_path=client.V1HostPathVolumeSource( path=settings.DIRS_NVIDIA.get('libcuda'))), ]
def get_gpu_volumes(): dirs_nvidia = conf.get(DIRS_NVIDIA) return [ client.V1Volume( name='nvidia-bin', host_path=client.V1HostPathVolumeSource(path=dirs_nvidia.get('bin'))), client.V1Volume( name='nvidia-lib', host_path=client.V1HostPathVolumeSource(path=dirs_nvidia.get('lib'))), client.V1Volume( name='nvidia-libcuda', host_path=client.V1HostPathVolumeSource(path=dirs_nvidia.get('libcuda'))), ]
def create_pv(username, namespace, path, storage_size): safe_chars = set(string.ascii_lowercase + string.digits) # Need to format the username that same way jupyterhub does. username = escapism.escape(username, safe=safe_chars, escape_char='-').lower() name = 'gpfs-{!s}'.format(username) claim_name = 'claim-{!s}'.format(username) path = os.path.join(path, username) metadata = client.V1ObjectMeta(name=name, namespace=namespace) claim_ref = client.V1ObjectReference(namespace=namespace, name=claim_name) host_path = client.V1HostPathVolumeSource(path, 'DirectoryOrCreate') spec = client.V1PersistentVolumeSpec( access_modes=[ 'ReadWriteOnce', ], capacity={ 'storage': storage_size, }, claim_ref=claim_ref, host_path=host_path, storage_class_name='gpfs', persistent_volume_reclaim_policy='Retain', volume_mode='Filesystem') pv = client.V1PersistentVolume('v1', 'PersistentVolume', metadata, spec) return pv, path
def create_volume(volume_data): if "name" in volume_data: volume = client.V1Volume(name=volume_data["name"]) # persistent claim if "persistentVolumeClaim" in volume_data.has_key: volume_pvc = volume_data["persistentVolumeClaim"] if "claimName" in volume_pvc: pvc = client.V1PersistentVolumeClaimVolumeSource( claim_name=volume_pvc["claimName"]) volume.persistent_volume_claim = pvc # hostpath if "hostPath" in volume_data and "path" in volume_data["hostPath"]: host_path = client.V1HostPathVolumeSource( path=volume_data["hostPath"]["path"]) if "hostPath" in volume_data and "type" in volume_data["hostPath"]: host_path.type = volume_data["hostPath"]["type"] volume.host_path = host_path # nfs if ("nfs" in volume_data and "path" in volume_data["nfs"] and "server" in volume_data["nfs"]): volume.nfs = client.V1NFSVolumeSource( path=volume_data["nfs"]["path"], server=volume_data["nfs"]["server"]) return volume return None
def create_storage(self, name, capacity, storageClassName, infrastructure_id, properties): v1 = self.coreV1Api() logger.debug('storageClassName=' + storageClassName) if (storageClassName == 'hostpath'): hostpath = properties.get('hostpath', None) if (hostpath is None): raise ValueError("Hostpath property must be provided") spec = client.V1PersistentVolumeSpec( capacity={'storage': capacity}, access_modes=['ReadWriteOnce'], host_path=client.V1HostPathVolumeSource(path=hostpath, type='')) storage = client.V1PersistentVolume( api_version='v1', kind='PersistentVolume', metadata=client.V1ObjectMeta( name=name, labels={"infrastructure_id": infrastructure_id}), spec=spec) logger.debug("Creating storage %s" % str(storage)) api_response = v1.create_persistent_volume(storage) logger.debug("Storage created. status='%s'" % str(api_response.status)) else: # the storage provisioner will create the persistent volume in this case pass
def create(existing_volumes, create_volumes, namespace_name, labels): for i in range(existing_volumes, create_volumes): # creating a instance of class Namespace body = client.V1PersistentVolume() labels['type'] = "local" # giving name for the namespace as given in the function call body.metadata = client.V1ObjectMeta(name="cassandra-data-" + str(i), namespace=namespace_name, labels=labels) host_path = client.V1HostPathVolumeSource( path="/tmp/data/cassandra-data-" + str(i)) #creating volume Spec spec = client.V1PersistentVolumeSpec( access_modes=["ReadWriteOnce"], capacity={"storage": "5Gi"}, host_path=host_path, persistent_volume_reclaim_policy="Delete") body.spec = spec v1.create_persistent_volume(body=body) return "persistent volume created"
def add_vol(name, mount_path, host_path): vol = k8s_client.V1Volume( name=name, host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type='')) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount(mount_path=mount_path, name=name))
def create_job(self, name, image, cmd, path): container = client.V1Container( name=name, image=image, env=[client.V1EnvVar(name='PYTHONUNBUFFERED', value='0')], command=cmd, volume_mounts=[ client.V1VolumeMount( name=name + "-volume", mount_path="/root", ) ]) volume = client.V1Volume(name=name + "-volume", host_path=client.V1HostPathVolumeSource( path=path, )) template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( name=name, labels={"user": self.user}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume], )) spec = client.V1JobSpec(template=template) job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=name), spec=spec) client.BatchV1Api().create_namespaced_job(namespace=self.namespace, body=job)
def submit(self): """Submit a image spec to openshift's s2i and wait for completion """ volume_mounts = [ client.V1VolumeMount(mount_path="/var/run/docker.sock", name="docker-socket") ] volumes = [ client.V1Volume(name="docker-socket", host_path=client.V1HostPathVolumeSource( path="/var/run/docker.sock")) ] if self.push_secret: volume_mounts.append( client.V1VolumeMount(mount_path="/root/.docker", name='docker-push-secret')) volumes.append( client.V1Volume(name='docker-push-secret', secret=client.V1SecretVolumeSource( secret_name=self.push_secret))) self.pod = client.V1Pod(metadata=client.V1ObjectMeta( name=self.name, labels={"name": self.name}), spec=client.V1PodSpec(containers=[ client.V1Container( image=self.builder_image, name="builder", args=self.get_cmd(), image_pull_policy='Always', volume_mounts=volume_mounts, ) ], volumes=volumes, restart_policy="Never")) try: ret = self.api.create_namespaced_pod(self.namespace, self.pod) except client.rest.ApiException as e: if e.status == 409: # Someone else created it! pass else: raise w = watch.Watch() try: for f in w.stream(self.api.list_namespaced_pod, self.namespace, label_selector="name={}".format(self.name)): if f['type'] == 'DELETED': self.progress('pod.phasechange', 'Deleted') return self.pod = f['object'] self.progress('pod.phasechange', self.pod.status.phase) if self.pod.status.phase == 'Succeeded': self.cleanup() elif self.pod.status.phase == 'Failed': self.cleanup() finally: w.stop()
def time_stat(): start = startOp().add_volume( k8s_client.V1Volume( name='start', host_path=k8s_client.V1HostPathVolumeSource( path="/lib/modules"))).add_volume_mount( k8s_client.V1VolumeMount( mount_path='/lib/modules', name='start')).add_volume( k8s_client.V1Volume( name='dev', host_path=k8s_client.V1HostPathVolumeSource( path="/dev"))).add_volume_mount( k8s_client.V1VolumeMount( mount_path='/dev', name='dev')) start.add_node_selector_constraint('kubernetes.io/hostname', '10.0.1.180') start.add_resource_limit("cambricon.com/mlu", "4")
def _get_volumes(self): volume_list = [] if self.volumes and self.volumes != "": for name, item in self.volumes.items(): volume_list.append( client.V1Volume(name=name, host_path=client.V1HostPathVolumeSource( path=item.get("path")))) return volume_list
def _mount_hostpath(task): from kubernetes import client as k8s_client return task.add_volume( k8s_client.V1Volume( name=volume_name, host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=""), )).add_volume_mount( k8s_client.V1VolumeMount(mount_path=mount_path, name=volume_name))
def get_volume(volume, claim_name=None, volume_mount=None): if claim_name: pv_claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=claim_name) return client.V1Volume(name=volume, persistent_volume_claim=pv_claim) elif volume_mount: return client.V1Volume( name=volume, host_path=client.V1HostPathVolumeSource(path=volume_mount)) else: empty_dir = client.V1EmptyDirVolumeSource() return client.V1Volume(name=volume, empty_dir=empty_dir)
def get(self): return client.V1PersistentVolume( kind='PersistentVolume', api_version='v1', metadata=client.V1ObjectMeta(name='postgres-pv', labels={'pv': 'postgres'}), spec=client.V1PersistentVolumeSpec( storage_class_name='manual', capacity={'storage': '2Gi'}, access_modes=['ReadWriteOnce'], host_path=client.V1HostPathVolumeSource( path='/data/postgres_storage')))
def get_job_volumes(volumes): volume_list = list() for key in volumes.keys(): host_path = client.V1HostPathVolumeSource(path=os.path.realpath( volumes[key].strip()), type='DirectoryOrCreate') volume = client.V1Volume(name=key, host_path=host_path) volume_list.append(volume) return volume_list
def define_host_path_volume(self, name, path, data_type=''): """ Represents a host path on the node mapped into a pod. This does not change permissions and kubernetes is not responsible for creating the path location or managing it , it only uses the volume for mounting and checking on the type of the path Host path volumes do not support ownership management or SELinux relabeling. @param name ,, str name of volume that is created. @param path ,, str path of the directory on the host. If the path is a symlink, it will follow the link to the real path. @param type ,, str Type for HostPath Volume Defaults to "". """ host_path_vol = client.V1HostPathVolumeSource(path=path, type=data_type) return client.V1Volume(name=name, host_path=host_path_vol)
def get_volume(volume, claim_name=None, host_path=None, read_only=None): if claim_name: pv_claim = client.V1PersistentVolumeClaimVolumeSource( claim_name=claim_name, read_only=read_only) return client.V1Volume(name=volume, persistent_volume_claim=pv_claim) if host_path: return client.V1Volume( name=volume, host_path=client.V1HostPathVolumeSource(path=host_path)) empty_dir = client.V1EmptyDirVolumeSource() return client.V1Volume(name=volume, empty_dir=empty_dir)
def time_stat(): start = startOp().add_node_selector_constraint('beta.kubernetes.io/arch', 'amd64').add_volume( k8s_client.V1Volume(name='start', host_path=k8s_client.V1HostPathVolumeSource(path="/root"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/root', name='start')) amd = amdOp(start.output).add_node_selector_constraint('beta.kubernetes.io/arch', 'amd64').add_volume( k8s_client.V1Volume(name='amd1', host_path=k8s_client.V1HostPathVolumeSource(path="/root"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/root', name='amd1')) arm = armOp(start.output).add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64').add_volume( k8s_client.V1Volume(name='arm1', host_path=k8s_client.V1HostPathVolumeSource(path="/root"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/root', name='arm1')) camb = cambriconOp(start.output).add_volume( k8s_client.V1Volume(name='cambricon-mlu', host_path=k8s_client.V1HostPathVolumeSource(path="/home/dl-plateform"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/home/dl-plateform', name='cambricon-mlu')) camb.add_resource_limit("cambricon.com/mlu", "1") camb.add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64') amd1 = amd1Op(amd.output).add_node_selector_constraint('beta.kubernetes.io/arch', 'amd64').add_volume( k8s_client.V1Volume(name='amd2', host_path=k8s_client.V1HostPathVolumeSource(path="/root"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/root', name='amd2')) arm1 = arm1Op(arm.output).add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64').add_volume( k8s_client.V1Volume(name='arm2', host_path=k8s_client.V1HostPathVolumeSource(path="/root"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/root', name='arm2')) arm_camb = armOp_camb(camb.output).add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64').add_volume( k8s_client.V1Volume(name='camb2', host_path=k8s_client.V1HostPathVolumeSource(path="/root"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/root', name='camb2')) end = endOp(amd1.output, arm1.output, arm_camb.output).add_node_selector_constraint('beta.kubernetes.io/arch', 'amd64').add_volume( k8s_client.V1Volume(name='end', host_path=k8s_client.V1HostPathVolumeSource(path="/root"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/root', name='end'))
def demo(): ''' data_path = "/home/newnfs/hyperai_data/Foundation/AID", user_name = "admin",data_job_id = "20191203-1951-data",model_job_id = "20191203-1951-model",var_job_id = "20191203-1951-var",lr = 0.0010000000475,epoch = 2520,batch_size = 8 :return: ''' detection_data_path = "/root/aircraft" user_name = "admin" detection_job_id = "20200514-detection" data = ReadyData(detection_data_path, detection_job_id, user_name).add_volume(k8s_client.V1Volume( name='nfs-storage', host_path=k8s_client.V1LocalVolumeSource(path="/home/newnfs"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/newnfs", name='nfs-storage')).add_node_selector_constraint('beta.kubernetes.io/arch', 'amd64') camb = CambARMExecute(data.output, detection_job_id, user_name).add_volume(k8s_client.V1Volume( name='nfs-storage', host_path=k8s_client.V1LocalVolumeSource(path="/home/newnfs"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/newnfs", name='nfs-storage')).add_volume(k8s_client.V1Volume( name='aaa', host_path=k8s_client.V1LocalVolumeSource(path="/sys/kernel/debug"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/sys/kernel/debug", name='aaa')).add_volume(k8s_client.V1Volume( name='bbb', host_path=k8s_client.V1LocalVolumeSource(path="/tmp/.X11-unix"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/tmp/.X11-unix", name='bbb')).add_volume(k8s_client.V1Volume( name='ccc', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/v8.2_arm"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/Cambricon-Test-v8.2_arm", name='ccc')).add_volume(k8s_client.V1Volume( name='ddd', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/arm_v8.0/v8.0_arm/ARM64-v8.0/arm64/congcan"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/congcan", name='ddd')).add_volume(k8s_client.V1Volume( name='eee', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/v8.0/Cambricon-MLU100/datasets"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/datasets", name='eee')).add_volume(k8s_client.V1Volume( name='fff', host_path=k8s_client.V1LocalVolumeSource(path="/mnt/xfs/project/camb/v8.0/Cambricon-MLU100/models"))).add_volume_mount( k8s_client.V1VolumeMount(mount_path="/home/models", name='fff')).add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64') camb.add_resource_limit("cambricon.com/mlu", "1") # 挂载节点上的设备驱动 device_name = "dev-cambricon" camb.add_volume(k8s_client.V1Volume(name=device_name, host_path=k8s_client.V1HostPathVolumeSource( path="/dev/cambricon_c10Dev0"))).add_volume_mount( k8s_client.V1VolumeMount(name=device_name, mount_path="/dev/cambricon_c10Dev0")).add_node_selector_constraint('beta.kubernetes.io/arch', 'arm64')
def export_deployment(self): # Configureate Pod template container container = client.V1Container( name=self.dm_name, image=self.image, ports=[ client.V1ContainerPort(container_port=int(port)) for port in self.container_port ], image_pull_policy='Always', env=[ client.V1EnvVar(name='LANG', value='en_US.UTF-8'), client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8') ], resources=client.V1ResourceRequirements(limits=self.re_limits, requests=self.re_requests), volume_mounts=[ client.V1VolumeMount(mount_path='/opt/logs', name='logs') ], liveness_probe=client.V1Probe( initial_delay_seconds=5, tcp_socket=client.V1TCPSocketAction( port=int(self.container_port[0])))) # Create and configurate a spec section secrets = client.V1LocalObjectReference('registrysecret') volume = client.V1Volume( name='logs', host_path=client.V1HostPathVolumeSource(path='/opt/logs')) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"project": self.dm_name}), spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secrets], volumes=[volume])) selector = client.V1LabelSelector( match_labels={"project": self.dm_name}) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int( self.replicas), template=template, selector=selector, min_ready_seconds=3) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=self.dm_name), spec=spec) return deployment
def persistent_volume(self, name, storage, accessModes=["ReadWriteOnce"], host_path=True, patch=False): """Create persistent volume by default on host.""" ps_vol = client.V1PersistentVolume(kind="PersistentVolume", api_version="v1") ps_vol.metadata = client.V1ObjectMeta(name=name) spec = client.V1PersistentVolumeSpec(capacity={"storage": storage}, access_modes=accessModes, storage_class_name=name) if host_path: spec.host_path = client.V1HostPathVolumeSource(path=f'/mnt/data/{name}') ps_vol.spec = spec try: api_core.create_persistent_volume(body=ps_vol) LOG.info(f'Persistent Volume: {name} created.') except ApiException as e: if e.status == 409 and patch: api_core.patch_persistent_volume(name=name, body=ps_vol) LOG.info(f'PeVolume: {name} patched.') else: LOG.error(f'Exception message: {e}')
def get_volumes(self): project_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) volumes = [ client.V1Volume( name='reload', host_path=client.V1HostPathVolumeSource( path=os.path.join(project_root, 'src'), type='Directory' ) ) ] volume_mounts = [ client.V1VolumeMount( name='reload', mount_path='/src' ) ] return volumes, volume_mounts
def _get_volumes(self, volumes): volume_list = [] if volumes != None and volumes != "": for name, item in volumes.items(): if item.get("type") == VOLUME_TYPE_PVC: volume_list.append(client.V1Volume( name=name, persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource( claim_name=name ) )) elif item.get("type") == VOLUME_TYPE_HOST_PATH: volume_list.append(client.V1Volume( name=name, host_path=client.V1HostPathVolumeSource( path=item.get("path") ) ))
def _parse_volumes(volume_list): volumes = [] volume_mounts = [] for v in volume_list: vname = str(uuid.uuid4()) vhost = v['host']['path'] vcontainer = v['container']['bind'] vro = (v['container'].get('mode') == 'ro') volumes.append( client.V1Volume( name=vname, host_path=client.V1HostPathVolumeSource(path=vhost))) volume_mounts.append( client.V1VolumeMount(name=vname, mount_path=vcontainer, read_only=vro)) return volumes, volume_mounts
def _generate_pod_template(self, *args, **kwargs): containers = kwargs.get("containers", []) initial_containers = kwargs.get("initial_containers", []) volumes_json = kwargs.get("volumes", []) deploy_name = kwargs.get("name") labels = kwargs.get("labels", {}) labels.update({"app": deploy_name}) restart_policy = kwargs.get("restart_policy", "Always") volumes = [] for volume in volumes_json: volume_name = volume.get("name") host_path = volume.get("host_path", None) empty_dir = volume.get("empty_dir", None) parameters = {} if host_path: host_path = client.V1HostPathVolumeSource(path=host_path) parameters.update({"host_path": host_path}) if empty_dir: empty_dir = client.V1EmptyDirVolumeSource(**empty_dir) parameters.update({"empty_dir": empty_dir}) persistent_volume_claim = volume.get("pvc", None) if persistent_volume_claim: persistent_volume_claim = client.V1PersistentVolumeClaimVolumeSource( claim_name=persistent_volume_claim) parameters.update( {"persistent_volume_claim": persistent_volume_claim}) volumes.append(client.V1Volume(name=volume_name, **parameters)) initial_container_pods = self._generate_container_pods( initial_containers) container_pods = self._generate_container_pods(containers) pod_spec = client.V1PodSpec( init_containers=initial_container_pods, containers=container_pods, volumes=volumes, restart_policy=restart_policy, ) spec_metadata = client.V1ObjectMeta(labels=labels) template_spec = client.V1PodTemplateSpec(metadata=spec_metadata, spec=pod_spec) LOG.info("template spec %s", template_spec) return template_spec
def demo_op(name: str, metadata=markdown_metadata, is_exit_handler=False) -> ContainerOp: op = ContainerOp(name=name, image=BASE_IMAGE, command=['sh', '-c'], arguments=[ 'echo "Running step $0" && echo "$1" > $2', name, metadata(name), METADATA_FILE_PATH, ], is_exit_handler=is_exit_handler, output_artifact_paths=default_artifact_path()) op.add_volume( k8s.V1Volume(name='volume', host_path=k8s.V1HostPathVolumeSource(path='/data/out')))\ .add_volume_mount(k8s.V1VolumeMount(name='volume', mount_path=OUT_DIR)) return op
def create_volume(volume_data): if "name" in volume_data: volume = client.V1Volume(name=volume_data["name"]) # persistent claim if "persistentVolumeClaim" in volume_data: volume_pvc = volume_data["persistentVolumeClaim"] if "claimName" in volume_pvc: pvc = client.V1PersistentVolumeClaimVolumeSource( claim_name=volume_pvc["claimName"]) volume.persistent_volume_claim = pvc # hostpath if "hostPath" in volume_data and "path" in volume_data["hostPath"]: host_path = client.V1HostPathVolumeSource( path=volume_data["hostPath"]["path"]) if "type" in volume_data["hostPath"]: host_path.type = volume_data["hostPath"]["type"] volume.host_path = host_path # nfs if ("nfs" in volume_data and "path" in volume_data["nfs"] and "server" in volume_data["nfs"]): volume.nfs = client.V1NFSVolumeSource( path=volume_data["nfs"]["path"], server=volume_data["nfs"]["server"]) # secret if "secret" in volume_data: volume.secret = client.V1SecretVolumeSource( secret_name=volume_data["secret"]["secretName"]) # configMap if "configMap" in volume_data: volume.config_map = client.V1ConfigMapVolumeSource( name=volume_data["configMap"]["name"]) return volume return None
def create_deployment_object(url, url_id): # Configureate Pod template container container = client.V1Container( name="{}-container".format(url_id), image="cloudcam_main:1.0", env=[ client.V1EnvVar(name="URL", value=url), client.V1EnvVar(name="URL_ID", value=url_id) ], volume_mounts=[client.V1VolumeMount(name="aaaa", mount_path="/DATA")]) volume = client.V1Volume( name="aaaa", host_path=client.V1HostPathVolumeSource(path="/DATA")) # Create and configurate a spec section template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( labels={"app": "{}-container".format(url_id)}), spec=client.V1PodSpec( containers=[container], volumes=[volume])) # Create the specification of deployment spec = client.V1DeploymentSpec( replicas=1, template=template, selector={'matchLabels': { "app": "{}-container".format(url_id) }}) # Instantiate the deployment object deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name="{}-main".format(url_id)), spec=spec) return deployment
def resnet_pipeline(raw_data_dir='/mnt/workspace/raw_data', processed_data_dir='/mnt/workspace/processed_data', model_dir='/mnt/workspace/saved_model', epochs=50, trtserver_name='trtis', model_name='resnet_graphdef', model_version=1, webapp_prefix='webapp', webapp_port=80): persistent_volume_name = 'nvidia-workspace' persistent_volume_path = '/mnt/workspace' op_dict = {} op_dict['preprocess'] = PreprocessOp('preprocess', raw_data_dir, processed_data_dir) op_dict['train'] = TrainOp('train', op_dict['preprocess'].output, model_dir, model_name, model_version, epochs) op_dict[ 'train'].execution_options.caching_strategy.max_cache_staleness = "P0D" op_dict['deploy_inference_server'] = InferenceServerLauncherOp( 'deploy_inference_server', op_dict['train'].output, trtserver_name) op_dict['deploy_webapp'] = WebappLauncherOp( 'deploy_webapp', op_dict['deploy_inference_server'].output, model_name, model_version, webapp_prefix, webapp_port) for _, container_op in op_dict.items(): container_op.add_volume( k8s_client.V1Volume(host_path=k8s_client.V1HostPathVolumeSource( path=persistent_volume_path), name=persistent_volume_name)) container_op.add_volume_mount( k8s_client.V1VolumeMount(mount_path=persistent_volume_path, name=persistent_volume_name))
def parse_volume_and_mount(volume_conf, pod_name): """Get k8s volumes list and volume mounts list from the volume config string. Args: volume_conf (string): the volumes config string, e.g. "host_path=c0,mount_path=/path0;claim_name=c1,mount_path=/path1". pod_name (string): the pod name Return: volumes (List): a Python list contains k8s volumes. volume_mounts (List): a Python list contains k8s volume mounts. """ volumes = [] volume_mounts = [] volume_dicts = parse(volume_conf) for i, volume_dict in enumerate(volume_dicts): volume_name = pod_name + "-volume-%d" % i if "claim_name" in volume_dict: pvc_volume_source = client.V1PersistentVolumeClaimVolumeSource( claim_name=volume_dict["claim_name"], read_only=False) volume = client.V1Volume(name=volume_name, persistent_volume_claim=pvc_volume_source) elif "host_path" in volume_dict: volume = client.V1Volume( name=volume_name, host_path=client.V1HostPathVolumeSource( path=volume_dict["host_path"], type=volume_dict.get("type", None), ), ) volumes.append(volume) volume_mounts.append( client.V1VolumeMount(name=volume_name, mount_path=volume_dict["mount_path"])) return volumes, volume_mounts