def create_workflow_object(): """ Creates the diamond DAG from the workflow examples Looks something like this where we flow from left to right one -> (two, three) -> four """ container = client.V1Container(name="busybox", image="gcr.io/google-containers/busybox", command=[ "sh", "-c", "echo Starting on: $(date); sleep 5; \ echo Goodbye cruel world at: $(date)" ]) stepone = DagV1WorkflowStep( name="stepone", job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec( template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"workflow": "stepone"}), spec=client.V1PodSpec(containers=[container], restart_policy="Never"))))) steptwo = DagV1WorkflowStep( name="steptwo", dependencies=['stepone'], job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec( template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"workflow": "steptwo"}), spec=client.V1PodSpec(containers=[container], restart_policy="Never"))))) stepthree = DagV1WorkflowStep( name="stepthree", dependencies=['stepone'], job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec( template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"workflow": "stepthree"}), spec=client.V1PodSpec(containers=[container], restart_policy="Never"))))) stepfour = DagV1WorkflowStep( name="stepfour", dependencies=['steptwo', 'stepthree'], job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec( template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"workflow": "stepfour"}), spec=client.V1PodSpec(containers=[container], restart_policy="Never"))))) workflow = DagV1Workflow( metadata=client.V1ObjectMeta(name="diamond"), spec=DagV1WorkflowSpec(selector=client.V1LabelSelector( match_labels={"workflow": "diamond"}), steps=[stepone, steptwo, stepthree, stepfour])) return workflow
def create_k8s_job(self, job: KubernetesCMDJob) -> client.V1Job: volume_mount = client.V1VolumeMount(name='download-volume', mount_path=DEFAULT_PROJECT_PATH) image = job.job_config.properties.get('ai_flow_worker_image') if isinstance(job.exec_cmd, List): cmd = job.exec_cmd else: cmd = [job.exec_cmd] working_dir = KubernetesJobPlugin.get_container_working_dir(job) job_container = client.V1Container(name='cmd-job', image=image, image_pull_policy='Always', command=cmd, working_dir=working_dir, volume_mounts=[volume_mount]) pod_spec = KubernetesJobPlugin.create_init_container(job, volume_mount, job_container) labels = {'app': 'ai-flow', 'component': 'cmd-job-' + str(job.instance_id)} object_meta = client.V1ObjectMeta(labels=labels, annotations={ANNOTATION_WATCHED: 'True', ANNOTATION_JOB_ID: str(job.instance_id), ANNOTATION_JOB_UUID: str(job.uuid), ANNOTATION_WORKFLOW_ID: str( job.job_context.workflow_execution_id)}) template_spec = client.V1PodTemplateSpec(metadata=object_meta, spec=pod_spec) job_spec = client.V1JobSpec(template=template_spec, backoff_limit=0) object_meta = client.V1ObjectMeta(labels=labels, name=self.generate_job_name(job)) job = client.V1Job(metadata=object_meta, spec=job_spec, api_version='batch/v1', kind='Job') return job
def create_job_manifest(envs, commands, name, image, template_file): if template_file is not None: with open(template_file) as f: job = yaml.safe_load(f) job["metadata"]["name"] = name job["spec"]["template"]["metadata"]["labels"]["app"] = name job["spec"]["template"]["spec"]["containers"][0]["image"] = image job["spec"]["template"]["spec"]["containers"][0][ "command"] = commands job["spec"]["template"]["spec"]["containers"][0]["name"] = name job["spec"]["template"]["spec"]["containers"][0]["env"] = envs job["spec"]["template"]["spec"]["containers"][0][ "command"] = commands else: container = client.V1Container(image=image, command=commands, name=name, env=envs) pod_temp = client.V1PodTemplateSpec( spec=client.V1PodSpec(restart_policy="OnFailure", containers=[container]), metadata=client.V1ObjectMeta(name=name, labels={"app": name})) job = client.V1Job(api_version="batch/v1", kind="Job", spec=client.V1JobSpec(template=pod_temp), metadata=client.V1ObjectMeta(name=name)) return job
def create_job_object(): # Configureate Pod template container # volume inside of which you put your service account # This is the secret name, basically volume_name = "google-cloud-json" google_app_credentials_path = os.environ.get( 'GOOGLE_APPLICATION_CREDENTIALS') # create a volume mount volume_mount = client.V1VolumeMount(mount_path='/etc/stuff', name=volume_name) # Create environment variables for container. # In this case, grab the values from the execution environment # perhaps using something like a .env file. env = [ client.V1EnvVar( name='GOOGLE_APPLICATION_CREDENTIALS', # note this is a path + the filename of the app creds in the secret value='/etc/stuff/key.json'), # google_app_credentials_path), client.V1EnvVar(name='GCS_STAGING_URL', value=os.environ.get('GCS_STAGING_URL')), client.V1EnvVar(name='GCS_EXPORT_URL', value=os.environ.get('GCS_EXPORT_URL')), client.V1EnvVar(name='ES_HOST', value=os.environ.get('ES_HOST')) ] # Create a volume. # This will go into the spec section of the template # Note that this specifies a secret volume # The secret needs to be created separately. volume = client.V1Volume( name=volume_name, secret=client.V1SecretVolumeSource(secret_name='google-cloud-json')) # Create the container section that will # go into the spec section container = client.V1Container( name="omicidx-builder", image="seandavi/omicidx-builder", volume_mounts=[volume_mount], env=env, command=['/bin/bash', '/code/biosample_pipeline.sh']) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "omicidx-builder"}), spec=client.V1PodSpec(restart_policy="Never", volumes=[volume], containers=[container])) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=4) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=JOB_NAME), spec=spec) return job
def create_job_object( self, job_name, container_image, args, envs, resources, label_key, label_value, backoff_limit=Constants.BACKOFF_LIMIT, ): try: pod_template_spec = self.create_pod_template_spec( job_name, container_image, args, envs, resources) # Create and configure a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={label_key: label_value}), spec=pod_template_spec) # Create the specification of deployment spec = client.V1JobSpec(template=template, backoff_limit=backoff_limit, ttl_seconds_after_finished=3 * 24 * 60 * 60) # Instantiate the job object job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=job_name), spec=spec) return job except: print('error create_job_object')
def create(self, job_spec=None, pod_spec=None): """Creates and runs the job on the cluster. Args: job_spec: A dictionary of keyword arguments that will be passed to V1JobSpec() pod_spec: A dictionary of keyword arguments that will be passed to V1PodSpec() Returns: A dictionary containing the results of creating the job on the cluster. """ if job_spec is None: job_spec = self.job_spec if pod_spec is None: pod_spec = self.pod_spec if not self._containers: raise ValueError( "Containers not found. " "Use add_containers() to specify containers before creating the job." ) # TODO: Set the backoff limit to 1. There will be no retry if the job fails. # Convert job name to lower case job_name = str(self.job_name).lower() job_body = client.V1Job(kind="Job") job_body.metadata = client.V1ObjectMeta(namespace=self.namespace, name=job_name) job_body.status = client.V1JobStatus() template = pod_template(self._containers, self._volumes, **pod_spec) job_body.spec = client.V1JobSpec(template=template.template, **job_spec) self.creation_response = api_request(api.create_namespaced_job, self.namespace, job_body) return self.creation_response
def generate_deployment_spec(self, pod_template_spec): """Generate a V1Job initialized with correct completion and parallelism (for HP search) and with the provided V1PodTemplateSpec :param pod_template_spec: V1PodTemplateSpec """ if not isinstance(pod_template_spec, k8s_client.V1PodTemplateSpec): raise TypeError("""pod_template_spec must be a V1PodTemplateSpec, but got %s""" % type(pod_template_spec)) job_spec = k8s_client.V1JobSpec( template=pod_template_spec, parallelism=self.runs, completions=self.runs, backoff_limit=0, ) return k8s_client.V1Job(api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta( name=self.job_name, generate_name=constants.JOB_DEFAULT_NAME, labels=self.labels), spec=job_spec)
def make_job_object(filename,filepath,id_training): env_vars=None if env_vars is None: env_vars ={"ENDPOINT":ENDPOINT ,"MINIO_ACCESS_KEY":MINIO_ACCESS_KEY , "MINIO_SECRET_KEY" : MINIO_SECRET_KEY , "BUCKET": BUCKET , "FILENAME":str(filename), "FILEPATH": str(filepath),"ID_TRAINING":str(id_training)} env_list = [] for env_name, env_value in env_vars.items(): env_list.append( client.V1EnvVar(name=env_name, value=env_value) ) container = client.V1Container( name="estratto", image="ziofededocker/dummy:latest", command=["python","/app/estrazione.py"], env=env_list, image_pull_policy="IfNotPresent") template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "estratto"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container])) spec = client.V1JobSpec( template=template, backoff_limit=0) job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="estrazione-job"), spec=spec) return job
def create_job_object(self, job_name, container_image, args): volume_name = "" # volume inside of which you put your service account google_app_credentials_path = os.environ.get( 'GOOGLE_APPLICATION_CREDENTIALS') volume_mount = client.V1VolumeMount(mount_path='/'.join( google_app_credentials_path.split('/')[:-1]), name=volume_name) env = client.V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS', value=google_app_credentials_path) container = client.V1Container(name=job_name, image=container_image, args=args, volume_mounts=[volume_mount], env=[env], image_pull_policy="Always") volume = client.V1Volume( name=volume_name, secret=client.V1SecretVolumeSource( secret_name='<secret-where-you-put-the-service-account>')) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "sample"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container], volumes=[volume])) spec = client.V1JobSpec(template=template, backoff_limit=3, ttl_seconds_after_finished=60) job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=job_name), spec=spec) return job
def create_job_object(message, environment_image): """Function to create the AWS EKS Job object Arguments: message {[dict]} -- Submission message from AWS SQS queue Returns: [AWS EKS Job class object] -- AWS EKS Job class object """ PYTHONUNBUFFERED_ENV = client.V1EnvVar(name="PYTHONUNBUFFERED", value="1") AUTH_TOKEN_ENV = client.V1EnvVar(name="AUTH_TOKEN", value=AUTH_TOKEN) EVALAI_API_SERVER_ENV = client.V1EnvVar( name="EVALAI_API_SERVER", value=EVALAI_API_SERVER ) MESSAGE_BODY_ENV = client.V1EnvVar(name="BODY", value=json.dumps(message)) submission_pk = message["submission_pk"] image = message["submitted_image_uri"] # Configureate Pod agent container agent_container = client.V1Container( name="agent", image=image, env=[PYTHONUNBUFFERED_ENV] ) volume_mount_list = get_volume_mount_list("/dataset") # Configureate Pod environment container environment_container = client.V1Container( name="environment", image=environment_image, env=[ PYTHONUNBUFFERED_ENV, AUTH_TOKEN_ENV, EVALAI_API_SERVER_ENV, MESSAGE_BODY_ENV, ], resources=client.V1ResourceRequirements( limits={"nvidia.com/gpu": "1"} ), volume_mounts=volume_mount_list, ) volume_list = get_volume_list() # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "evaluation"}), spec=client.V1PodSpec( containers=[environment_container, agent_container], restart_policy="Never", volumes=volume_list, ), ) # Create the specification of deployment spec = client.V1JobSpec(backoff_limit=1, template=template) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta( name="submission-{0}".format(submission_pk) ), spec=spec, ) return job
def create_job_object(env_vars={"ENDPOINT":"http://172.17.0.1:9000","MINIO_ACCESS_KEY":"admin","MINIO_SECRET_KEY":"keystone"}): env_list = [] for env_name, env_value in env_vars.items(): env_list.append( client.V1EnvVar(name=env_name, value=env_value) ) container = client.V1Container( name="estratto", image="ziofededocker/estrazionefile:latest", command=["python","/app/estrazione.py"], env=env_list, image_pull_policy="IfNotPresent") template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "estratto"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container])) spec = client.V1JobSpec( template=template, backoff_limit=0) job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=JOB_NAME), spec=spec) return job
def create_job_object(container_image, image_pull_secret=None,service_account_name=None): pull_secret = client.V1LocalObjectReference( name=image_pull_secret ) # Configureate Pod template container container = client.V1Container( name="installer", image=container_image, command=list(install_command())) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "mcm-installer"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container], image_pull_secrets=[pull_secret],service_account_name=service_account_name)) # Create the specification of deployment spec = client.V1JobSpec( template=template, backoff_limit=1) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=JOB_NAME), spec=spec) return job
def create_cronjob(self, schedule: str, pipeline_uuid: str) -> str: run_job_endpoint= f'{kalytical_config.kalytical_api_endpoint}/pipeline/dispatcher/run_by_pipeline_uuid?pipeline_uuid={pipeline_uuid}' job_name = f'kalytical-api-trigger-{pipeline_uuid}' container = client.V1Container( name=job_name, image=kalytical_config.ext_cron_image_uri, env=[client.V1EnvVar(name='KALYTICAL_API_ENDPOINT', value=run_job_endpoint), client.V1EnvVar(name='KALYTICAL_API_AUTH_SECRET', value=kalytical_config.kalytical_api_token)], resources=client.V1ResourceRequirements(limits={'cpu':'.1', 'memory':'50Mi'})) pod_template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={'kalytical-api-pipeline': job_name}), spec=client.V1PodSpec(restart_policy="Never", containers=[container])) job_spec = client.V1JobSpec( completions=1, backoff_limit=0, template=pod_template) job_template = client.V1beta1JobTemplateSpec(job_tepmlate = job_template, schedule=schedule), cron_body = client.V1beta1CronJob( spec=cron_spec, metadata=client.V1ObjectMeta(name=job_name) ) try: self.log.debug(f"Attempting to write namespaced cronjob with namespace={self._k8s_namespace} parameters={str(cron_body)}") self._k8s_batch_client.create_namespaced_cron_job( namespace=self._k8s_namespace, body=cron_body) except ApiException as e: if e.status == 409: self.log.warn("This job already existed. We will re-create it.") self.delete_cronjob(job_name=job_name) #TODO Instead use patching self.create_cronjob(schedule=schedule, pipeline_uuid=pipeline_uuid) else: raise e return job_name
def create_cronjob(username, namespace, dbhost): try: config.load_kube_config() except: config.load_incluster_config() api = client.BatchV1beta1Api() body = client.V1beta1CronJob( metadata=client.V1ObjectMeta(name=namespace), spec=client.V1beta1CronJobSpec( job_template=client.V1beta1JobTemplateSpec( spec=client.V1JobSpec(template=client.V1PodTemplateSpec( spec=client.V1PodSpec( containers=[ client.V1Container(name="scheduler", image="sahandha/lsstscheduler", args=["/bin/bash","-c","python /sched.py {} {} {};".format(username, namespace, dbhost)], resources=client.V1ResourceRequirements( requests={'memory': "200Mi", 'cpu': "100m"}) )], restart_policy="OnFailure" ))) ), schedule = "*/1 * * * *") ) try: api = api.create_namespaced_cron_job(namespace, body) except ApiException as e: print("Exception when calling BatchV1beta1Api->create_namespaced_cron_job: %s\n" % e)
def create_job(self, name, image, cmd, path): container = client.V1Container( name=name, image=image, env=[client.V1EnvVar(name='PYTHONUNBUFFERED', value='0')], command=cmd, volume_mounts=[ client.V1VolumeMount( name=name + "-volume", mount_path="/root", ) ]) volume = client.V1Volume(name=name + "-volume", host_path=client.V1HostPathVolumeSource( path=path, )) template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( name=name, labels={"user": self.user}), spec=client.V1PodSpec( restart_policy="Never", containers=[container], volumes=[volume], )) spec = client.V1JobSpec(template=template) job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=name), spec=spec) client.BatchV1Api().create_namespaced_job(namespace=self.namespace, body=job)
def create_job_object(runner_image, region, s3_path, pvc_name): target_folder = get_target_folder(s3_path) # Configureate Pod template container container = k8s_client.V1Container( name="copy-dataset-worker", image=runner_image, command=["aws"], args=["s3", "sync", s3_path, "/mnt/" + target_folder], volume_mounts=[k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')], env=[k8s_client.V1EnvVar(name="AWS_REGION", value=region), k8s_client.V1EnvVar(name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_ACCESS_KEY_ID", name="aws-secret"))), k8s_client.V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_SECRET_ACCESS_KEY", name="aws-secret"))) ], ) volume = k8s_client.V1Volume( name='data-storage', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name) ) # Create and configurate a spec section template = k8s_client.V1PodTemplateSpec( # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}), spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure")) # Create the specification of deployment spec = k8s_client.V1JobSpec( # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}), template=template) # Instantiate the deployment object deployment = k8s_client.V1Job( api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta(name=container.name), spec=spec) return deployment
def generate_app_migrator_job(self, tag: str, source: str): log.debug("Generating app-migrator job: tag={} source={}".format(tag, source)) deployment = self.appsV1Api.read_namespaced_deployment(source, self.namespace) metadata = client.V1ObjectMeta( labels={"app": APP_MIGRATOR}, name=APP_MIGRATOR, namespace=self.namespace ) new_image = generate_image( old_image=deployment.spec.template.spec.containers[0].image, new_tag=tag ) job = client.V1Job( api_version="batch/v1", kind="Job", metadata=metadata, spec=client.V1JobSpec( template=client.V1PodTemplateSpec( spec=deployment.spec.template.spec, metadata=metadata ) ), ) job.spec.template.spec.containers[0].image = new_image job.spec.template.spec.restart_policy = "Never" job.spec.template.spec.containers[0].command = config.APP_MIGRATOR_COMMAND job.spec.template.spec.containers[0].args = config.APP_MIGRATOR_ARGS job.spec.template.spec.containers[0].resources = client.V1ResourceRequirements() self.batchV1Api.create_namespaced_job(self.namespace, job) log.debug( "Generation of app-migrator job complete: tag={} source={}".format( tag, source ) )
def create_job_manifest(n_comp, n_para): # 引数で渡された情報にもとづき, Kubernetes の job を生成する container = client.V1Container( name="job-worker", image="masato0921/tutorial-docker-kubernetes:chapter_010_006", env=[ client.V1EnvVar(name="BROKER_URL", value="amqp://*****:*****@task-queue:5672"), client.V1EnvVar(name="QUEUE", value="task-queue") ]) template = client.V1PodTemplateSpec( spec=client.V1PodSpec(containers=[container], restart_policy="Never")) spec = client.V1JobSpec(backoff_limit=4, template=template, completions=n_comp, parallelism=n_para) job = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=OBJECT_NAME), spec=spec) return job
def getBody(namespace='couture-console', jobname='nifi-test', containername='nifi-test', containerimage='sidharthc/nifi-test:alpha', env_vars=ENV_LIST, containerargs=['SFTP_TO_HDFS.py']): body = client.V1Job(api_version="batch/v1", kind="Job") # Body needs Metadata # Attention: Each JOB must have a different name! body.metadata = client.V1ObjectMeta(namespace=namespace, name=jobname) # And a Status body.status = client.V1JobStatus() # Now we start with the Template... template = client.V1PodTemplate() template.template = client.V1PodTemplateSpec() env_list = [] for env_name, env_value in env_vars.items(): env_list.append(client.V1EnvVar(name=env_name, value=env_value)) container = client.V1Container(name=containername, image=containerimage, args=containerargs, env=env_list) template.template.spec = client.V1PodSpec(containers=[container], restart_policy='Never') # And finaly we can create our V1JobSpec! body.spec = client.V1JobSpec(ttl_seconds_after_finished=100, template=template.template) return body
def kube_create_job_object(name, container_image, namespace="default", container_name="jobcontainer", env_vars={}): # Body is the object Body body = client.V1Job(api_version="batch/v1", kind="Job") # Body needs Metadata # Attention: Each JOB must have a different name! body.metadata = client.V1ObjectMeta(namespace=namespace, name=name) # And a Status body.status = client.V1JobStatus() # Now we start with the Template... template = client.V1PodTemplate() template.template = client.V1PodTemplateSpec() # Passing Arguments in Env: env_list = [] for env_name, env_value in env_vars.items(): env_list.append(client.V1EnvVar(name=env_name, value=env_value)) container = client.V1Container(name=container_name, image=container_image, env=env_list) template.template.spec = client.V1PodSpec(containers=[container], restart_policy='Never') # And finaly we can create our V1JobSpec! body.spec = client.V1JobSpec(ttl_seconds_after_finished=30, template=template.template) return body
def _create_job_object(self, name: str, container_image: str, namespace: str = None, container_name: str = "servicecontainer", env_vars: dict = {}, command: list = [], active_deadline_seconds: int = 3600): namespace = self._get_namespace(namespace) body = client.V1Job(api_version="batch/v1", kind="Job") body.metadata = client.V1ObjectMeta(namespace=namespace, name=name) body.status = client.V1JobStatus() template = client.V1PodTemplate() template.template = client.V1PodTemplateSpec() env_list = [] for env_name, env_value in env_vars.items(): env_list.append(client.V1EnvVar(name=env_name, value=env_value)) container = client.V1Container(name=container_name, image=container_image, env=env_list, command=command) template.template.spec = client.V1PodSpec(containers=[container], restart_policy='Never') # Set active_deadline_seconds body.spec = client.V1JobSpec( ttl_seconds_after_finished=600, template=template.template, active_deadline_seconds=active_deadline_seconds) return body
def create_notification_job(zip_code: int) -> client.V1Job: """Creates job to fulfill requested notification""" return bot.k8s_batch.create_namespaced_job( namespace=namespace, body=client.V1Job( api_version='batch/v1', kind='Job', metadata=client.V1ObjectMeta(generate_name=JOB_NAME_PREFIX), spec=client.V1JobSpec( ttl_seconds_after_finished=JOB_TTL_SECONDS_AFTER_FINISHED, backoff_limit=JOB_MAX_RETRIES, template=client.V1PodTemplateSpec(spec=client.V1PodSpec( restart_policy=JOB_RESTART_POLICY, containers=[ client.V1Container( name='worker', image=job_image, resources=client.V1ResourceRequirements( requests=JOB_RESOURCE_REQUESTS), args=['--worker', '--zip_code', str(zip_code)], env=[ client.V1EnvVar(name=key, value=value) for key, value in { MONGO_USER: mongodb_user, MONGO_PASSWORD: mongodb_password, MONGO_HOST: mongodb_host, MONGO_PORT: mongodb_port, MY_TURN_API_KEY: my_turn_api_key }.items() ]) ])))))
def create_job_object(job_name, instance_id): # Configureate Pod template container container = kube_cli.V1Container( name="pi", image="perl", command=["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]) # Create and configurate a spec section template = kube_cli.V1PodTemplateSpec( metadata=kube_cli.V1ObjectMeta(labels={"app": "pi"}, annotations={ 'ai-flow/watched': 'True', 'ai-flow/job-id': str(instance_id), 'ai-flow/workflow-id': str(1) }), spec=kube_cli.V1PodSpec(restart_policy="Never", containers=[container])) # Create the specification of deployment spec = kube_cli.V1JobSpec(template=template, backoff_limit=4) # Instantiate the job object job = kube_cli.V1Job(api_version="batch/v1", kind="Job", metadata=kube_cli.V1ObjectMeta(name=job_name), spec=spec) return job
def create_clingo_job(prog_id): from kubernetes import config config.load_incluster_config() from kubernetes import client api_instance = client.BatchV1Api() namespace = 'default' body = client.V1Job( api_version='batch/v1', kind='Job', metadata=client.V1ObjectMeta(name='job-{}'.format(prog_id), ), spec=client.V1JobSpec(template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name='job-{}'.format(prog_id), ), spec=client.V1PodSpec( restart_policy='Never', containers=[ client.V1Container( name='aspaas-app', image='gcr.io/{}/worker'.format( current_app.config['PROJECT_ID']), command=["python"], args=["run.py", str(prog_id)], ) ], ), ), ), ) api_instance.create_namespaced_job(namespace, body)
def create(self): job_spec_name = "{}spec".format(self.name) return client.V1JobSpec( template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name=job_spec_name), spec=self.create_pod_spec() ) )
def request_trader(self, stockid): config.load_incluster_config() v1 = client.BatchV1Api() job = client.V1Job() job.metadata = client.V1ObjectMeta(name="trader{}".format(stockid)) container = client.V1Container(name="trader{}".format(stockid), image="outk/trader:v0.0.1", image_pull_policy="IfNotPresent") container.env = [ V1EnvVar(name="STOCK_ID", value=stockid), V1EnvVar(name="TRADE_PWD", value=TRADE_PWD), V1EnvVar(name="LOGIN_ID", value=LOGIN_ID), V1EnvVar(name="LOGIN_PASS", value=LOGIN_PASS), V1EnvVar(name="LOGIN_PAGE_ID", value=LOGIN_PAGE_ID), V1EnvVar(name="LOGIN_CONTROL_ID", value=LOGIN_CONTROL_ID), V1EnvVar(name="GET_PRICE_PAGE_ID", value=GET_PRICE_PAGE_ID), V1EnvVar(name="GET_PRICE_DATA_STORE_ID", value=GET_PRICE_DATA_STORE_ID), V1EnvVar(name="GET_PRICE_CONTROL_ID", value=GET_PRICE_CONTROL_ID), V1EnvVar(name="GET_BUYING_POWER_PAGE_ID", value=GET_BUYING_POWER_PAGE_ID), V1EnvVar(name="GET_BUYING_POWER_DATA_STORE_ID", value=GET_BUYING_POWER_DATA_STORE_ID), V1EnvVar(name="GET_BUYING_POWER_CONTROL_ID", value=GET_BUYING_POWER_CONTROL_ID), V1EnvVar(name="GET_STOCK_HOLDINGS_PAGE_ID", value=GET_STOCK_HOLDINGS_PAGE_ID), V1EnvVar(name="GET_STOCK_HOLDINGS_DATA_STORE_ID", value=GET_STOCK_HOLDINGS_DATA_STORE_ID), V1EnvVar(name="GET_STOCK_HOLDINGS_CONTROL_ID", value=GET_STOCK_HOLDINGS_CONTROL_ID), V1EnvVar(name="BUY_ORDER_PAGE_ID", value=BUY_ORDER_PAGE_ID), V1EnvVar(name="BUY_ORDER_CONTROL_ID", value=BUY_ORDER_CONTROL_ID), V1EnvVar(name="SELL_ORDER_PAGE_ID", value=SELL_ORDER_PAGE_ID), V1EnvVar(name="SELL_ORDER_CONTROL_ID", value=SELL_ORDER_CONTROL_ID), V1EnvVar(name="ORDER_CANCEL_PAGE_ID", value=ORDER_CANCEL_PAGE_ID), V1EnvVar(name="ORDER_CANCEL_CONTROL_ID", value=ORDER_CANCEL_CONTROL_ID) ] spec = client.V1PodSpec(containers=[container], restart_policy="Never") podtemplatespec = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name="trader{}".format(stockid)), spec=spec) jobspec = client.V1JobSpec(template=podtemplatespec, ttl_seconds_after_finished=12 * 60 * 60) job.spec = jobspec v1.create_namespaced_job(namespace="trader", body=job) logging.info("trader{} job created.".format(stockid)) return
def create_generator_job(kubeconfigpath,serviceid,namespace,archeplaydatapath,generatorpath): try: config.load_kube_config("/home/app/web/kubeconfig") batch_v1 = client.BatchV1Api() volume1=client.V1Volume( name="generatorjob"+serviceid, host_path={"path": "/var/run"} ) volume2=client.V1Volume( name="kubeconfig", host_path={"path": kubeconfigpath} ) volume3=client.V1Volume( name="archeplaydata", host_path={"path":archeplaydatapath} ) mount1 = client.V1VolumeMount( name="generatorjob"+serviceid, mount_path="/var/run" ) mount2 = client.V1VolumeMount( name="kubeconfig", mount_path="/home/app/web/kubeconfig" ) mount3 = client.V1VolumeMount( name="archeplaydata", mount_path="/home/app/web/archeplay/data" ) container = client.V1Container( name="generatorjob"+serviceid, image="python:3.8.1-slim-buster", volume_mounts=[mount1,mount2,mount3], command=["bash",generatorpath], image_pull_policy="Always" ) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"generatorjob": "generatorjob"+serviceid}), spec=client.V1PodSpec(restart_policy="Never", containers=[container],volumes=[volume1,volume2,volume3])) # Create the specification of deployment spec = client.V1JobSpec( template=template, backoff_limit=0 ) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name="generatorjob"+serviceid), spec=spec) api_response = batch_v1.create_namespaced_job( body=job, namespace=namespace) success_message = "Generator Job Intitated" return("success",success_message,str(api_response.status)) except Exception as Error: error_message="Generator job Failed to Intitate Deploy Job" return("error",error_message,str(Error))
def dispatch(endpoint, access_key, secret_key, bucket_name, object_name): job_name = f"local-rebuild-{uuid.uuid1()}" downloader_env = [ client.V1EnvVar(name="ENDPOINT", value=endpoint), client.V1EnvVar(name="ACCESS_KEY", value=access_key), client.V1EnvVar(name="SECRET_KEY", value=secret_key), client.V1EnvVar(name="BUCKET_NAME", value=bucket_name), client.V1EnvVar(name="OBJECT_NAME", value=object_name) ] downloader_container = client.V1Container( name="downloader", image=os.getenv("DOWNLOADER_IMAGE"), env=downloader_env, volume_mounts=[client.V1VolumeMount( name="processor-input", mount_path="/output")]) processor_container = client.V1Container( name="processor", image=os.getenv("PROCESSOR_IMAGE"), volume_mounts=[client.V1VolumeMount(name="processor-input", mount_path="/input", read_only=True), client.V1VolumeMount(name="processor-output", mount_path="/output")]) pod_spec = client.V1PodSpec( restart_policy="Never", init_containers=[downloader_container], containers=[processor_container], volumes=[ client.V1Volume(name="processor-input", empty_dir=client.V1EmptyDirVolumeSource()), client.V1Volume(name="processor-output", empty_dir=client.V1EmptyDirVolumeSource()), ]) # Create and configure a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(name=job_name, labels={ "app": "local-rebuild-processor"}), spec=pod_spec) # Create the specification of the job spec = client.V1JobSpec( template=template, backoff_limit=0) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=job_name, labels={ "app": "local-rebuild-processor"}), spec=spec) client.BatchV1Api().create_namespaced_job( body=job, namespace="default")
def create_job_object(container): template = kubernetes_client.V1PodTemplateSpec( metadata=kubernetes_client.V1ObjectMeta(labels={"app": "pi"}), spec=kubernetes_client.V1PodSpec(restart_policy="Never", containers=[container])) return kubernetes_client.V1Job( api_version="batch/v1", kind="Job", metadata=kubernetes_client.V1ObjectMeta(name="pi"), spec=kubernetes_client.V1JobSpec(template=template, backoff_limit=4))
def calc(n): if n < 2: return 1 else: config.load_incluster_config() core_api = client.CoreV1Api() batch_api = client.BatchV1Api() watch_api = watch.Watch() container = client.V1Container(name=name, image=image, args=["--calc", f"{n}"]) template = client.V1PodTemplateSpec( spec=client.V1PodSpec(restart_policy="Never", service_account_name=service_account, containers=[container])) job = client.V1Job(metadata=client.V1ObjectMeta( namespace=namespace, generate_name=f"{name}-"), spec=client.V1JobSpec(template=template, ttl_seconds_after_finished=0)) res = batch_api.create_namespaced_job( body=job, namespace=namespace, ) job_name = res.metadata.name for event in watch_api.stream(func=core_api.list_namespaced_pod, namespace=namespace, label_selector=f"job-name={job_name}", timeout_seconds=1200): if event["object"].status.phase == "Succeeded": pod_name = event["object"].metadata.name watch_api.stop() break if pod_name: log = core_api.read_namespaced_pod_log( name=pod_name, namespace=namespace, ) val = int(log) batch_api.delete_namespaced_job(job_name, namespace, propagation_policy="Background") return val