def create_job_object(message, environment_image): """Function to create the AWS EKS Job object Arguments: message {[dict]} -- Submission message from AWS SQS queue Returns: [AWS EKS Job class object] -- AWS EKS Job class object """ PYTHONUNBUFFERED_ENV = client.V1EnvVar(name="PYTHONUNBUFFERED", value="1") AUTH_TOKEN_ENV = client.V1EnvVar(name="AUTH_TOKEN", value=AUTH_TOKEN) EVALAI_API_SERVER_ENV = client.V1EnvVar( name="EVALAI_API_SERVER", value=EVALAI_API_SERVER ) MESSAGE_BODY_ENV = client.V1EnvVar(name="BODY", value=str(message)) submission_pk = message["submission_pk"] image = message["submitted_image_uri"] # Configureate Pod agent container agent_container = client.V1Container( name="agent", image=image, env=[PYTHONUNBUFFERED_ENV] ) # Configureate Pod environment container environment_container = client.V1Container( name="environment", image=environment_image, env=[ PYTHONUNBUFFERED_ENV, AUTH_TOKEN_ENV, EVALAI_API_SERVER_ENV, MESSAGE_BODY_ENV, ], ) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "evaluation"}), spec=client.V1PodSpec( containers=[environment_container, agent_container], restart_policy="Never", ), ) # Create the specification of deployment spec = client.V1JobSpec(backoff_limit=1, template=template) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta( name="submission-{0}".format(submission_pk) ), spec=spec, ) return job
def configure_workflow_job(namespace: str, project_name: str, project_repo_url: str, project_repo_branch: str = 'master', retries: int = 2, image: str = BODYWORK_DOCKER_IMAGE) -> k8s.V1Job: """Configure a Bodywork workflow execution job. :param namespace: The namespace to deploy the job to. :param project_name: The name of the Bodywork project that the stage belongs to. :param project_repo_url: The URL for the Bodywork project Git repository. :param project_repo_branch: The Bodywork project Git repository branch to use, defaults to 'master'. :param retries: Number of times to retry running the stage to completion (if necessary), defaults to 2. :param image: Docker image to use for running the stage within, defaults to BODYWORK_DOCKER_IMAGE. :return: A configured k8s job object. """ vcs_env_vars = [ k8s.V1EnvVar(name=SSH_GITHUB_KEY_ENV_VAR, value_from=k8s.V1EnvVarSource( secret_key_ref=k8s.V1SecretKeySelector( key=SSH_GITHUB_KEY_ENV_VAR, name=SSH_GITHUB_SECRET_NAME, optional=True))) ] container = k8s.V1Container(name='bodywork', image=image, image_pull_policy='Always', env=vcs_env_vars, command=['bodywork', 'workflow'], args=[ f'--namespace={namespace}', project_repo_url, project_repo_branch ]) pod_spec = k8s.V1PodSpec( service_account_name=BODYWORK_WORKFLOW_SERVICE_ACCOUNT, containers=[container], restart_policy='Never') pod_template_spec = k8s.V1PodTemplateSpec(spec=pod_spec) job_spec = k8s.V1JobSpec( template=pod_template_spec, completions=1, backoff_limit=retries, ttl_seconds_after_finished=BODYWORK_WORKFLOW_JOB_TIME_TO_LIVE) job = k8s.V1Job(metadata=k8s.V1ObjectMeta(name=project_name, namespace=namespace, labels={'app': 'bodywork'}), spec=job_spec) return job
def create_job_object(job_type, fib_n): # Configurate env variables envs = [ client.V1EnvVar(name='JOB_TYPE', value=job_type), client.V1EnvVar(name='FIB_N', value=fib_n) ] # Configurate VolumeMounts volume_mount = client.V1VolumeMount(mount_path='/mnt/storage', name='storage') # Configurate resource requests and limits resources = client.V1ResourceRequirements(requests={ 'memory': '64Mi', 'cpu': '250m' }, limits={ 'memory': '128Mi', 'cpu': '500m' }) # Configurate Pod template container container = client.V1Container(name=CONTAINER_NAME, image=CONTAINER_IMAGE, env=envs, volume_mounts=[volume_mount], resources=resources) # Configure Volume template if job_type == JOB_STATEFUL: volume = client.V1Volume(name='storage', host_path={'path': '/c/minikube-pv'}) else: volume = client.V1Volume(name='storage', empty_dir={}) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "job-app"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container], volumes=[volume])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=1) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(generate_name=JOB_NAME), spec=spec) return job
def create_job_object(self, config): pipeline_name = config[keys.GlobalKeys.PIPELINE][ keys.PipelineKeys.ARGS][keys.PipelineDetailKeys.NAME] job_name = to_dns1123(f'{self.job_prefix}{pipeline_name}', length=63) labels = self.extra_labels or {} job_labels = { "app": "zenml", "pipeline": pipeline_name, "datasource-id": config[keys.GlobalKeys.PIPELINE][keys.PipelineKeys.DATASOURCE][ keys.DatasourceKeys.ID], "pipeline-id": get_id(pipeline_name) } labels.update(job_labels) # make sure our labels are present config_encoded = base64.b64encode(json.dumps(config).encode()).decode( 'utf-8') # kubernetes needs the config as string command = [ 'python', '-m', K8S_ENTRYPOINT, 'run_pipeline', '--config_b64', config_encoded ] container = k8s_client.V1Container( name=job_name, image=self.image, command=command, image_pull_policy=self.image_pull_policy) # Create and configure a spec section template = k8s_client.V1PodTemplateSpec( metadata=k8s_client.V1ObjectMeta(labels=labels), spec=k8s_client.V1PodSpec(restart_policy="Never", containers=[container])) # Create the specification of deployment spec = k8s_client.V1JobSpec(template=template, backoff_limit=1) # Instantiate the job object job = k8s_client.V1Job(api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta( annotations=self.extra_annotations, labels=labels, name=job_name, namespace=self.namespace), spec=spec) return job
def fake_v1_job_error(): return client.V1Job(api_version='batch/v1', kind='Job', metadata=client.V1ObjectMeta(name='curry-test001', namespace='curryns'), spec=client.V1JobSpec( completions=1, template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( name='curry-test001', namespace='curryns'), spec=client.V1PodSpec(hostname='job', containers=['image']))), status=client.V1JobStatus(succeeded=2, ))
def create_job(namespace, name, template): # Create the specification of deployment spec = client.V1JobSpec(template=template) # Instantiate the job object job = client.V1Job(metadata=client.V1ObjectMeta(name=name), spec=spec) #api_instance = client.CoreV1Api(client.ApiClient()) api_instance = client.BatchV1Api(client.ApiClient()) #api_response = api_instance.create_namespaced_pod(body=job, namespace=namespace) api_response = api_instance.create_namespaced_job(body=job, namespace=namespace) print("Job created. status='%s'" % str(api_response.status))
def generate_job(self, pod_template_spec): """Generate a V1Job initialized with correct completion and parallelism (for HP search) and with the provided V1PodTemplateSpec""" if not isinstance(pod_template_spec, k8s_client.V1PodTemplateSpec): raise TypeError("""pod_template_spec must be a V1PodTemplateSpec, but got %s""" % type(pod_template_spec)) job_spec = k8s_client.V1JobSpec(template=pod_template_spec, parallelism=self.runs, completions=self.runs) return k8s_client.V1Job( metadata=k8s_client.V1ObjectMeta(name=self.name), spec=job_spec)
def make_job_object( name: str, container_image: str, command: List[str], namespace: str = 'default', container_name: str = 'jobcontainer', pod_labels: Dict[str, str] = None, service_account_name: str = 'default', ) -> k8s_client.V1Job: """Make a Kubernetes Job object with a single pod. See https://kubernetes.io/docs/concepts/workloads/controllers/job/#writing-a-job-spec Args: name: Name of job. container_image: Name of container image. command: Command to run. namespace: Kubernetes namespace to contain this Job. container_name: Name of the container. pod_labels: Dictionary of metadata labels for the pod. service_account_name: Name of the service account for this Job. Returns: `kubernetes.client.V1Job` object. """ pod_labels = pod_labels or {} return k8s_client.V1Job( api_version='batch/v1', kind='Job', metadata=k8s_client.V1ObjectMeta( namespace=namespace, name=sanitize_pod_name(name), ), status=k8s_client.V1JobStatus(), spec=k8s_client.V1JobSpec( template=k8s_client.V1PodTemplateSpec( metadata=k8s_client.V1ObjectMeta(labels=pod_labels), spec=k8s_client.V1PodSpec( containers=[ k8s_client.V1Container( name=container_name, image=container_image, command=command, ), ], service_account_name=service_account_name, restart_policy=RestartPolicy.NEVER.value, ), )), )
def scheduleJobs(): jobNames = [] for jobParameters in request.get_json(force=True): if not validateJobParameters(jobParameters): return abort(422, 'Invalid arguments') body = kubeClient.V1Job(api_version="batch/v1", kind="Job") # Body needs Metadata # Attention: Each JOB must have a different name! jobName = "r-job-" + str(uuid.uuid4()) body.metadata = kubeClient.V1ObjectMeta(namespace="default", name=jobName) # And a Status body.status = kubeClient.V1JobStatus() # Now we start with the Template... template = kubeClient.V1PodTemplate() template.template = kubeClient.V1PodTemplateSpec() # Passing Arguments in Env: env_list = createJobEnv(jobParameters, jobName) volume_mounts = kubeClient.V1VolumeMount(mount_path="/mydata", name="dose-volume") container = kubeClient.V1Container( name="r-container", image="monikeu/r-script-1:r-image-env", env=env_list, volume_mounts=[volume_mounts], image_pull_policy="Always") per_vol_claim = kubeClient.V1PersistentVolumeClaimVolumeSource( claim_name="dose-volume-claim") volume = kubeClient.V1Volume(name="dose-volume", persistent_volume_claim=per_vol_claim) template.template.spec = kubeClient.V1PodSpec(containers=[container], restart_policy='Never', volumes=[volume]) # And finaly we can create our V1JobSpec! body.spec = kubeClient.V1JobSpec(ttl_seconds_after_finished=600, template=template.template) try: response = api_instance.create_namespaced_job("default", body, pretty=True) pprint(response) jobNames.append(jobName) except ApiException as e: return "Error occurred during an attempt to create a job", e.status return 'Created one or more jobs: {}'.format(",".join(jobNames)), 201
def make_job(item): response = requests.get("http://localhost:8000/items/{}".format(item)) obj = json.loads(response.text) job = client.V1Job() job.metadata = client.V1ObjectMeta() job.metadata.name = item job.spec = client.V1JobSpec() job.spec.template = client.V1PodTemplate() job.spec.template.spec = client.V1PodTemplateSpec() job.spec.template.spec.restart_policy = "Never" job.spec.template.spec.containers = [ make_container(item, obj) ] return job
def _create_job_spec(self, name, command=None, image=None, env_vars=None): """Instantiate a Kubernetes job. :param name: Name of the job. :param image: Docker image to use to run the job on. :param command: List of commands to run on the given job. :param env_vars: List of environment variables (dictionaries) to inject into the workflow engine container. """ image = image or self._workflow_engine_image() command = command or self._workflow_engine_command() env_vars = env_vars or self._workflow_engine_env_vars() if isinstance(command, str): command = [command] elif not isinstance(command, list): raise ValueError( 'Command should be a list or a string and not {}'.format( type(command))) workflow_metadata = client.V1ObjectMeta(name=name) job = client.V1Job() job.api_version = 'batch/v1' job.kind = 'Job' job.metadata = workflow_metadata spec = client.V1JobSpec(template=client.V1PodTemplateSpec()) spec.template.metadata = workflow_metadata container = client.V1Container(name=name, image=image, image_pull_policy='IfNotPresent', env=[], volume_mounts=[], command=['/bin/bash', '-c'], args=command) container.env.extend(env_vars) container.volume_mounts = [ { 'name': 'default-shared-volume', 'mountPath': SHARED_FS_MAPPING['MOUNT_DEST_PATH'], }, ] spec.template.spec = client.V1PodSpec(containers=[container]) spec.template.spec.volumes = [ KubernetesWorkflowRunManager. k8s_shared_volume[REANA_STORAGE_BACKEND] ] job.spec = spec job.spec.template.spec.restart_policy = 'Never' job.spec.ttl_seconds_after_finished = TTL_SECONDS_AFTER_FINISHED job.spec.backoff_limit = 0 return job
def build_job(name, containers, volumes): # Pod Spec pod_spec = client.V1PodSpec(containers=containers, restart_policy="Never", volumes=volumes) pod_template_spec = client.V1PodTemplateSpec(spec=pod_spec) # Job Spec using the Pod Template spec job_spec = client.V1JobSpec(template=pod_template_spec) job_meta = client.V1ObjectMeta(name=name, namespace="default", labels={"app": "model-training"}) job_body = client.V1Job(metadata=job_meta, spec=job_spec) return job_body
def get_obj(self): """ :description: Generate job spec. """ return client.V1Job( api_version=self.api_version, kind=self.kind, metadata=client.V1ObjectMeta(name=self.slug, labels=self.labels, annotations=self.annotations), spec=client.V1JobSpec(template=self.pod_template.get_obj(), backoff_limit=self.backoff_limit, ttl_seconds_after_finished=10), )
def configure(self, owner, title, tag, job_id, callback_url, route_name): job_id = str(job_id) config = self.model_config safeowner = clean(owner) safetitle = clean(title) name = f"{safeowner}-{safetitle}" container = kclient.V1Container( name=job_id, image= f"{self.cr}/{self.project}/{safeowner}_{safetitle}_tasks:{tag}", command=[ "cs-jobs", "--callback-url", callback_url, "--route-name", route_name, ], env=self.env(owner, title, config), resources=kclient.V1ResourceRequirements(**config["resources"]), ) # Create and configurate a spec section template = kclient.V1PodTemplateSpec( metadata=kclient.V1ObjectMeta(labels={ "app": f"{name}-job", "job-id": job_id }), spec=kclient.V1PodSpec( restart_policy="Never", containers=[container], node_selector={"component": "model"}, ), ) # Create the specification of deployment spec = kclient.V1JobSpec(template=template, backoff_limit=1, ttl_seconds_after_finished=0) # Instantiate the job object job = kclient.V1Job( api_version="batch/v1", kind="Job", metadata=kclient.V1ObjectMeta(name=job_id), spec=spec, ) if not self.quiet: print(yaml.dump(job.to_dict())) return job
def create_table_job(table_path, tablejobimageid, kubeconfigpath, dbid, namespace, dbtype, tableid, Region, archeplaydatapath): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume2 = client.V1Volume(name="kubeconfig", host_path={"path": kubeconfigpath}) volume3 = client.V1Volume(name="archeplaydata", host_path={"path": archeplaydatapath}) mount2 = client.V1VolumeMount(name="kubeconfig", mount_path="/home/app/web/kubeconfig") mount3 = client.V1VolumeMount( name="archeplaydata", mount_path="/home/app/web/archeplay/data") container = client.V1Container(name="tablejob" + tableid, image=tablejobimageid, volume_mounts=[mount2, mount3], command=[ "python", "-u", "app.py", table_path, dbid, tableid, Region ], env=[{ "name": "archeplaydatapath", "value": "/home/app/web/archeplay/data" }], image_pull_policy="Always") # Create and configurate a spec section template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( labels={"tablejob": "tablejob" + tableid}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume2, volume3])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="tablejob" + tableid), spec=spec) api_response = batch_v1.create_namespaced_job(body=job, namespace=namespace) success_message = tableid + " Deploy Job Intitated" return ("success", success_message, str(api_response.status)) except Exception as Error: error_message = tableid + " Failed to Intitate Deploy Job" return ("error", error_message, str(Error))
def create_resource_job(resource_path, resourcejobimageid, kubeconfigpath, resourceid, state_store, code_type, serviceid, versionid, versionname, namespace): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume1 = client.V1Volume(name="buildjob" + resourceid, host_path={"path": "/var/run"}) volume2 = client.V1Volume(name="kubeconfig", host_path={"path": kubeconfigpath}) mount1 = client.V1VolumeMount(name="buildjob" + resourceid, mount_path="/var/run") mount2 = client.V1VolumeMount(name="kubeconfig", mount_path="/home/app/web/kubeconfig") container = client.V1Container(name="resourcejob" + resourceid, image=resourcejobimageid, volume_mounts=[mount1, mount2], command=[ "python3", "-u", "app.py", serviceid, versionid, resourceid, versionname, namespace ], env=[{ "name": "state_store", "value": state_store }], image_pull_policy="Always") # Create and configurate a spec section template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( labels={"resourcejob": "resourcejob" + resourceid}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume1, volume2])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="resourcejob" + resourceid), spec=spec) api_response = batch_v1.create_namespaced_job(body=job, namespace=namespace) success_message = resourceid + " Deploy Job Intitated" return ("success", success_message, str(api_response.status)) except Exception as Error: error_message = resourceid + " Failed to Intitate Deploy Job" return ("error", error_message, str(Error))
def create_job_object(job_arguments, size, docker_image, docker_image_tag, affinity): user = os.environ['USER'] job = client.V1Job( metadata=client.V1ObjectMeta( name='kaml-remote-{}-{}'.format(user, uuid.uuid1())), spec=client.V1JobSpec(template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name='kaml-remote-{}-{}'.format( user, uuid.uuid1()), labels={'type': size}), spec=client.V1PodSpec(containers=[ client.V1Container( name='kaml-remote', args=job_arguments, image='{}:{}'.format(docker_image, docker_image_tag), image_pull_policy='Always', env=[client.V1EnvVar(name='KAML_HOME', value='/app')], volume_mounts=[ client.V1VolumeMount(name='kaml-cfg-volume', read_only=True, mount_path='/app/kaml.cfg', sub_path='kaml.cfg'), client.V1VolumeMount( name='gcp-service-account', read_only=True, mount_path='/app/service-key.json', sub_path='service-key.json'), ]) ], affinity=affinity, volumes=[ client.V1Volume(name='kaml-cfg-volume', config_map=client. V1ConfigMapVolumeSource( name='kaml-cfg')), client.V1Volume( name='gcp-service-account', secret=client.V1SecretVolumeSource( secret_name='gcp-service-account', items=[ client.V1KeyToPath( key='service-key.json', path='service-key.json') ])) ], restart_policy='Never')))) return (job)
def createJob(self, evaluation_job): logging.info("Creating evaluation job: %s for experiment: %s", evaluation_job.uuid, evaluation_job.experiment_id) job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=evaluation_job.uuid), spec=client.V1JobSpec( template=self._create_pod_template_spec_for_job(evaluation_job), backoff_limit=0, completions=1, parallelism=1)) logging.info("Creating evaluation job on kubernetes.") self.try_create_job(job)
def create_job_object(runner_image, region, s3_path, pvc_name): target_folder = get_target_folder(s3_path) # Configureate Pod template container container = k8s_client.V1Container( name="copy-dataset-worker", image=runner_image, command=["aws"], args=["s3", "sync", s3_path, "/mnt/" + target_folder], volume_mounts=[ k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt') ], env=[ k8s_client.V1EnvVar(name="AWS_REGION", value=region), k8s_client.V1EnvVar( name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( key="AWS_ACCESS_KEY_ID", name="aws-secret"))), k8s_client.V1EnvVar( name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( key="AWS_SECRET_ACCESS_KEY", name="aws-secret"))) ], ) volume = k8s_client.V1Volume( name='data-storage', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource( claim_name=pvc_name)) # Create and configurate a spec section template = k8s_client.V1PodTemplateSpec( # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}), spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure")) # Create the specification of deployment spec = k8s_client.V1JobSpec( # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}), template=template) # Instantiate the deployment object deployment = k8s_client.V1Job( api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta(name=container.name), spec=spec) return deployment
def deleteJob(kJobname, podName, kNameSpace): try: config.load_incluster_config() except: config.load_kube_config('.kube/config') jobBody = client.V1Job() batchV1 = client.BatchV1Api() ret = batchV1.delete_namespaced_job(kJobname, kNameSpace) print("Job deleted: " + kJobname) podBody = client.V1DeleteOptions() coreV1 = client.CoreV1Api() ret = coreV1.delete_namespaced_pod(podName, kNameSpace) print("Pod deleted: " + podName) return
def _get_job_object(self, algorithm): job = client.V1Job() # Define job metadata job.metadata = client.V1ObjectMeta(namespace=NAMESPACE, name=self.resources_identifier) # Define job spec template = client.V1PodTemplate() template.template = client.V1PodTemplateSpec() env_list = [] env_list.append( client.V1EnvVar(name=ENV_VAR_JOB_NAME, value=self.resources_identifier)) docker_repo = os.environ.get(ENV_VAR_DOCKER_REPOSITORY, "") if docker_repo != "": image_name = f"{docker_repo}/{algorithm}:latest" else: image_name = f"{algorithm}:latest" volume_mounts = [ client.V1VolumeMount(name=VOLUME_NAME_ALGORITHM_INPUT, mount_path="/etc/config") ] container = client.V1Container(name="algorithm", image=image_name, volume_mounts=volume_mounts, env=env_list, image_pull_policy="Always") # command=["sleep", "5"]) cm_mount = client.V1ConfigMapVolumeSource( name=self.resources_identifier) volumes = [ client.V1Volume(config_map=cm_mount, name=VOLUME_NAME_ALGORITHM_INPUT) ] template.template.spec = client.V1PodSpec(containers=[container], restart_policy='Never', volumes=volumes) job.spec = client.V1JobSpec(ttl_seconds_after_finished=1200, template=template.template) return job
def create_job(self): job_metadata = client.V1ObjectMeta( name= f"mc-job-{self._job_parameters['job_number']}-{self._job_parameters['job_id']}", labels=self._job_labels, ) job = client.V1Job( spec=client.V1JobSpec(backoff_limit=0, template=self.create_pod_template()), metadata=job_metadata, kind="Job", api_version="batch/v1", ) return job
def create_job_manifest(self): image: str = self.mlcube.platform.container.image logging.info(f"Using image: {image}") container_args: List[str] = [] container_volume_mounts: Dict = dict() container_volumes: Dict = dict() container_args.append(self.mlcube.invoke.task_name) self.binding_to_volumes(self.mlcube.invoke.input_binding, container_args, container_volume_mounts, container_volumes) self.binding_to_volumes(self.mlcube.invoke.output_binding, container_args, container_volume_mounts, container_volumes) logging.info("Using Container arguments: %s" % container_args) container = client.V1Container(name="mlcube-container", image=image, args=container_args, volume_mounts=list( container_volume_mounts.values())) pod_template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={ "app": "mlcube", "app-name": self.mlcube.name, }), spec=client.V1PodSpec(restart_policy="Never", containers=[container], volumes=list(container_volumes.values()))) job_spec = client.V1JobSpec( template=pod_template, backoff_limit=4, ) self.mlcube_job_manifest = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(generate_name="mlcube-" + self.mlcube.name + "-"), spec=job_spec, ) logging.info("The MLCube Kubernetes Job manifest %s" % self.mlcube_job_manifest)
def getJobBody(namespace='couture-console', jobname='nifi-test', containername='nifi-test', containerimage='sidharthc/nifi-test:alpha', env_vars=ENV_LIST, containerargs=['SFTP_TO_HDFS.py']): body = client.V1Job(api_version="batch/v1", kind="Job") # Body needs Metadata # Attention: Each JOB must have a different name! body.metadata = client.V1ObjectMeta(namespace=namespace, name=jobname) # And a Status body.status = client.V1JobStatus() # Now we start with the Template... template = client.V1PodTemplate() template.template = client.V1PodTemplateSpec() env_list = [] for env_name, env_value in env_vars.items(): env_list.append( client.V1EnvVar(name=env_name, value=env_value) ) container = client.V1Container(name=containername, image=containerimage, args=containerargs, env=env_list) template.template.spec = client.V1PodSpec(containers=[container], restart_policy='Never') # And finaly we can create our V1JobSpec! body.spec = client.V1JobSpec(ttl_seconds_after_finished=100, template=template.template) return body
def _create_job_request(job_name, container, namespace=get_default_target_namespace()): # ===== # HACK ann = {"sidecar.istio.io/inject": "false"} # ===== # Create the Job request body body = client.V1Job( metadata=client.V1ObjectMeta(namespace=namespace, name=job_name), spec=client.V1JobSpec(template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(namespace=namespace, annotations=ann), spec=client.V1PodSpec(containers=[container], restart_policy="Never")))) return body
def generate_deployment_spec(self, pod_template_spec): """Generate a V1Job initialized with correct completion and parallelism (for HP search) and with the provided V1PodTemplateSpec""" if not isinstance(pod_template_spec, k8s_client.V1PodTemplateSpec): raise TypeError("""pod_template_spec must be a V1PodTemplateSpec, but got %s""" % type(pod_template_spec)) job_spec = k8s_client.V1JobSpec(template=pod_template_spec, parallelism=self.runs, completions=self.runs) return k8s_client.V1Job(api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta( generate_name=self.job_name, labels=self.labels, ), spec=job_spec)
def _create_job_object(self, name: str, container_image: str, namespace: str = None, container_name: str = "servicecontainer", env_vars: dict = {}, command: list = [], active_deadline_seconds: int = 3600): namespace = self._get_namespace(namespace) body = client.V1Job(api_version="batch/v1", kind="Job") body.metadata = client.V1ObjectMeta(namespace=namespace, name=name) body.status = client.V1JobStatus() template = client.V1PodTemplate() template.template = client.V1PodTemplateSpec() env_list = [] for env_name, env_value in env_vars.items(): env_list.append(client.V1EnvVar(name=env_name, value=env_value)) container = client.V1Container( name=container_name, image=container_image, env=env_list, command=command) template.template.spec = client.V1PodSpec( containers=[container], restart_policy='Never') # Set active_deadline_seconds body.spec = client.V1JobSpec( ttl_seconds_after_finished=600, template=template.template, active_deadline_seconds=active_deadline_seconds) return body
def create_job_object(name, command, args_command, deployment_specs_container, deployment_specs, cpu_request, memory_request, cpu_limit, memory_limit): try: # Configured Pod template container container = client.V1Container( name=name, image=deployment_specs_container.image, env=deployment_specs_container.env, command=command.split(" "), args=[args_command], liveness_probe=deployment_specs_container.liveness_probe, ports=deployment_specs_container.ports, readiness_probe=deployment_specs_container.readiness_probe, volume_mounts=deployment_specs_container.volume_mounts, resources=client.V1ResourceRequirements( # minimum amount of compute resources required requests={"cpu": cpu_request, "memory": memory_request}, # maximum amount of compute resources allowed limits={"cpu": cpu_limit, "memory": memory_limit} ) ) # Create and Configured a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": name}), spec=client.V1PodSpec(restart_policy="Never", containers=[container], volumes=deployment_specs.volumes, init_containers=deployment_specs.init_containers, service_account_name=deployment_specs.service_account_name)) # Create the specification of deployment spec = client.V1JobSpec( template=template, backoff_limit=4) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=name), spec=spec) return job except ApiException as e: LOG.error("Exception: %s\n" % e) sys.exit(1)
def get_job_object(submission_pk, spec): """Function to instantiate the AWS EKS Job object Arguments: submission_pk {[int]} -- Submission id spec {[V1JobSpec]} -- Specification of deployment of job Returns: [AWS EKS Job class object] -- AWS EKS Job class object """ job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta( name="submission-{0}".format(submission_pk)), spec=spec, ) return job
def create_job_object(): # Configureate Pod template container container = client.V1Container(name='busybox', image='busybox', args=['sleep', '6']) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={'name': 'simple-job'}), spec=client.V1PodSpec(restart_policy='OnFailure', containers=[container])) # Create the specification of deployment spec = client.V1JobSpec(template=template) # Instantiate the job object job = client.V1Job(api_version='batch/v1', kind='Job', metadata=client.V1ObjectMeta(name=JOB_NAME), spec=spec) return job