def initCronJob(self, key: str, value: str, full_path: str) -> None: name = self.dnsify(value) metadata = K.V1ObjectMeta(name=name) assert self.manifest_list clusterMeta = self.manifest_list.clusterMeta() if clusterMeta: metadata.annotations = clusterMeta.annotations # intentionally written this way so one can easily scan down paths container1 = K.V1Container( name="job", resources=K.V1ResourceRequirements(limits={}, requests={}), ) self.manifest = Manifest( data=[ K.V1beta1CronJob( metadata=metadata, kind="CronJob", api_version="batch/v1beta1", spec=K.V1beta1CronJobSpec( schedule="* * * * *", suspend=True, job_template=K.V1beta1JobTemplateSpec(spec=K.V1JobSpec( template=K.V1PodTemplateSpec(spec=K.V1PodSpec( containers=[container1])))), ), ) ], pluginName="metronome", manifestName=name, )
def build_k8s_object(aggregate_job, k8s_api_version=None, defaults=None): k8s_api_version = k8s_api_version or _K8S_API_VERSION version_parts = str(k8s_api_version).split('.') k8s_major, k8s_minor = int(version_parts[0]), int(version_parts[1]) if k8s_major != 1 or k8s_minor < 5: raise ValueError('Unsupported kubernetes api version') if k8s_minor >= 8: cronjob_api_version = 'batch/v1beta1' CronJob = k8s_models.V1beta1CronJob CronJobSpec = k8s_models.V1beta1CronJobSpec JobTemplateSpec = k8s_models.V1beta1JobTemplateSpec else: cronjob_api_version = 'batch/v2alpha1' CronJob = k8s_models.V2alpha1CronJob CronJobSpec = k8s_models.V2alpha1CronJobSpec JobTemplateSpec = k8s_models.V2alpha1JobTemplateSpec defaults = copy.deepcopy(defaults) if defaults is not None else {} if 'containerName' not in defaults: defaults['containerName'] = '{}-job'.format(aggregate_job['name']) if 'labels' not in defaults: defaults['labels'] = {} if 'labelKey' not in defaults: defaults['labelKey'] = 'kronjob/job' def _get_arg(key): return aggregate_job.get(key, defaults.get(key)) def _get_args(*keys): ''' Roughly speaking, turns camel case into snake case, e.g. 'DeviceType' -> 'device_type' but not always, e.g. 'IOError' -> 'IoError' ''' return {inflection.underscore(key): _get_arg(key) for key in keys} labels = _get_arg('labels') labels[_get_arg('labelKey')] = _get_arg('name') metadata = k8s_models.V1ObjectMeta(labels=labels, **_get_args('name', 'namespace')) env = _deserialize_k8s(aggregate_job.get('env'), 'list[V1EnvVar]') volume_mounts = _get_arg('volumeMounts') or [] volume_mounts = [ k8s_models.V1VolumeMount( **({inflection.underscore(k): v for k, v in volume_mount.items()})) for volume_mount in volume_mounts ] job_spec = k8s_models.V1JobSpec(template=k8s_models.V1PodTemplateSpec( metadata=k8s_models.V1ObjectMeta(labels=labels, **_get_args('annotations')), spec=k8s_models.V1PodSpec(containers=[ k8s_models.V1Container( env=env, name=_get_arg('containerName'), resources=k8s_models.V1ResourceRequirements( limits={ k: v for k, v in (('cpu', _get_arg('cpuLimit')), ('memory', _get_arg('memoryLimit'))) if v is not None } or None, requests={ k: v for k, v in (('cpu', _get_arg('cpuRequest')), ('memory', _get_arg('memoryRequest'))) if v is not None } or None, ), volume_mounts=volume_mounts, **_get_args('args', 'command', 'image', 'imagePullPolicy')) ], **_get_args('nodeSelector', 'restartPolicy', 'volumes'))), backoff_limit=_get_arg('backoffLimit')) if aggregate_job['schedule'] == 'once': # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Job.md k8s_object = k8s_models.V1Job(api_version='batch/v1', kind='Job', metadata=metadata, spec=job_spec) else: # Note that this can be one of two versions here: # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1beta1CronJob.md # or # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V2alpha1CronJob.md k8s_object = CronJob( api_version=cronjob_api_version, kind='CronJob', metadata=metadata, spec=CronJobSpec(job_template=JobTemplateSpec( metadata=k8s_models.V1ObjectMeta(labels=labels), spec=job_spec), **_get_args('concurrencyPolicy', 'failedJobsHistoryLimit', 'schedule', 'successfulJobsHistoryLimit', 'suspend', 'startingDeadlineSeconds'))) return k8s_object
parser.add_option("-h", "--host", help="api host") parser.add_option("-i", "--image", help="container image") (options, args) = parser.parse_args() #config.load_kube_config(config_file=options.kubeconfig) client.configuration.api_key['authorization'] = options.apikey client.configuration.host = options.host v1 = client.CoreV1Api() v1Batch = client.BatchV1Api() #define data structure job = models.V1Job() job_meta = models.V1ObjectMeta() job_spec = models.V1JobSpec() pod_spec_template = models.V1PodTemplateSpec() pod_spec = models.V1PodSpec() pod_meta = models.V1ObjectMeta() pod_spec_container = models.V1Container() env_var = models.V1EnvVar() #populate data structure #father object job.api_version = "batch/v1" job.kind = "Job" #job metadata job_meta.name = "p1" job.metadata = job_meta #containers spec 172.30.129.159:5000/testjob/worker
def __init__(self, image: Optional[str] = None, name: Optional[str] = None, namespace: Optional[str] = 'default', volume_mounts: Optional[List[Union[k8s.V1VolumeMount, dict]]] = None, envs: Optional[Dict[str, str]] = None, cmds: Optional[List[str]] = None, args: Optional[List[str]] = None, labels: Optional[Dict[str, str]] = None, node_selectors: Optional[Dict[str, str]] = None, ports: Optional[List[Union[k8s.V1ContainerPort, dict]]] = None, volumes: Optional[List[Union[k8s.V1Volume, dict]]] = None, image_pull_policy: Optional[str] = None, restart_policy: Optional[str] = "Never", image_pull_secrets: Optional[str] = None, init_containers: Optional[List[k8s.V1Container]] = None, service_account_name: Optional[str] = None, resources: Optional[Union[k8s.V1ResourceRequirements, dict]] = None, annotations: Optional[Dict[str, str]] = None, affinity: Optional[dict] = None, hostnetwork: bool = False, tolerations: Optional[list] = None, security_context: Optional[Union[k8s.V1PodSecurityContext, dict]] = None, configmaps: Optional[List[str]] = None, dnspolicy: Optional[str] = None, schedulername: Optional[str] = None, pod: Optional[k8s.V1Pod] = None, pod_template_file: Optional[str] = None, priority_class_name: Optional[str] = None): self.name = name self.namespace = namespace # Jobs self.job = k8s.V1Job() self.job.api_version = "batch/v1" self.job.kind = "Job" self.job.status = k8s.V1JobStatus() # MetaData self.metadata = k8s.V1ObjectMeta() self.metadata.labels = labels self.metadata.name = name self.metadata.namespace = namespace self.metadata.annotations = annotations # Container self.container = k8s.V1Container(name='base') self.container.image = image self.container.env = [] self.container.command = cmds or [] self.container.args = args or [] self.container.image_pull_policy = image_pull_policy self.container.ports = ports or [] self.container.resources = resources self.container.volume_mounts = volume_mounts or [] # Spec - Job spec config is a bit weird because you have to add a new template self.spec = k8s.V1PodSpec(containers=[self.container], restart_policy=restart_policy) self.template = k8s.V1PodTemplate() self.template.template = k8s.V1PodTemplateSpec() self.template.template.spec = self.spec # Volumes self.template.template.spec.volumes = volumes
def inner_submit( self, cmdline: str, job_name: Optional[str] = None, additional_setup_lines: Optional[List[str]] = None, job_count: Optional[int] = None, ) -> Tuple[List["concurrent.futures.Future[str]"], List[Tuple[int, int]]]: """Starts a Kubernetes pod that runs the specified shell command line.""" kubernetes_client = KubernetesClient() self.ensure_kubernetes_namespace() job_id = str(uuid4()) job_id_future: "concurrent.futures.Future[str]" = concurrent.futures.Future( ) job_id_future.set_result(job_id) job_id_futures = [job_id_future] is_array_job = job_count is not None number_of_subjobs = job_count if job_count is not None else 1 ranges = [(0, number_of_subjobs)] requested_resources = { k: v for k, v in self.job_resources.items() if k in ("memory", "cpu") or k.startswith("hugepages-") } umaskline = (f"umask {self.job_resources['umask']}; " if "umask" in self.job_resources else "") log_path = (self.format_log_file_path( self.cfut_dir, f"{job_id}_$JOB_COMPLETION_INDEX") if is_array_job else self.format_log_file_path(self.cfut_dir, job_id)) mounts = deduplicate_mounts( [Path(mount) for mount in self.job_resources["mounts"]] + [Path.cwd(), Path(self.cfut_dir).absolute()]) job_manifest = kubernetes_models.V1Job( api_version="batch/v1", kind="Job", metadata=kubernetes_models.V1ObjectMeta(name=job_id), spec=kubernetes_models.V1JobSpec( completion_mode="Indexed", completions=number_of_subjobs, parallelism=number_of_subjobs, ttl_seconds_after_finished=604800, # 7 days template=kubernetes_models.V1PodTemplateSpec( metadata=kubernetes_models.V1ObjectMeta( annotations={ "cluster-tools.scalableminds.com/job-id": job_id, "cluster-tools.scalableminds.com/job-is-array-job": str(is_array_job), "cluster-tools.scalableminds.com/job-name": job_name if job_name is not None else "", }), spec=kubernetes_models.V1PodSpec( containers=[ kubernetes_models.V1Container( image=self.job_resources["image"], image_pull_policy="IfNotPresent", working_dir=str(Path.cwd().absolute()), command=["/bin/bash"], name="worker", args=[ "-c", f"{umaskline}{cmdline} 0 2>&1 > >(tee -a {log_path})", ], env=[ kubernetes_models.V1EnvVar(name=name, value=value) for name, value in os.environ.items() if name not in ("PWD", "OLDPWD") and re.match("^[-._a-zA-Z][-._a-zA-Z0-9]*$", name) is not None ] + [ kubernetes_models.V1EnvVar(name="JOB_ID", value=job_id), kubernetes_models.V1EnvVar( name="JOB_IS_ARRAY_JOB", value=str(is_array_job)), ], security_context=kubernetes_models. V1SecurityContext(run_as_user=os.getuid(), run_as_group=os.getgid()), resources=kubernetes_models. V1ResourceRequirements( requests=requested_resources), volume_mounts=[ kubernetes_models.V1VolumeMount( name=volume_name_from_path(mount), mount_path=str(mount), ) for mount in mounts ], ) ], node_selector=self.job_resources.get("node_selector"), restart_policy="Never", volumes=[ kubernetes_models.V1Volume( name=volume_name_from_path(mount), host_path=kubernetes_models. V1HostPathVolumeSource(path=str(mount)), ) for mount in mounts ], ), ), ), ) try: kubernetes_client.batch.create_namespaced_job( body=job_manifest, namespace=self.job_resources["namespace"]) except Exception as e: print(e, type(e)) raise e return job_id_futures, ranges