def _container_to_pod(self, labels): # generate the service environment self._gen_service_env() pod_spec = PodSpec(self.jobname) pod_spec.restart_policy = "Never" main_container = self._container_spec() for vol_tag, vol in iteritems(self.service.template.inputs.volumes): # sanitize name for kubernetes vol_tag = string_to_dns_label(vol_tag) cvol = ContainerVolume(vol_tag, vol.mount_path) assert "resource_id" in vol.details and "filesystem" in vol.details, "resource_id and filesystem are required fields in volume details" cvol.set_type("AWS_EBS", vol_tag, vol.details["resource_id"], vol.details["filesystem"]) main_container.add_volume(cvol) logger.info("Mounting volume {} {} in {}".format( vol_tag, vol.details, vol.mount_path)) pod_spec.add_main_container(main_container) wait_container = self._generate_wait_container_spec() pod_spec.add_wait_container(wait_container) (cpu, mem, d_cpu, d_mem) = self._container_resources() main_container.add_resource_constraints("cpu_cores", cpu, limit=None) main_container.add_resource_constraints("mem_mib", mem, limit=mem) # handle artifacts self_sid = None if self.service.service_context: self_sid = self.service.service_context.service_instance_id # TODO: This function calls ax_artifact and needs to be rewritten. Ugly code. artifacts_container = pod_spec.enable_artifacts( self.software_info.image_namespace, self.software_info.image_version, self_sid, self.service.template.to_dict()) artifacts_container.add_env("AX_JOB_NAME", value=self.jobname) if self.service.template.docker_spec: dind_c = pod_spec.enable_docker( self.service.template.docker_spec.graph_storage_size_mib) dind_c.add_volumes(pod_spec.get_artifact_vols()) dind_c.add_resource_constraints("cpu_cores", d_cpu, limit=None) dind_c.add_resource_constraints("mem_mib", d_mem, limit=d_mem) service_id = None if self.service.service_context: service_id = self.service.service_context.service_instance_id pod_spec.add_annotation("ax_serviceid", service_id) pod_spec.add_annotation("ax_costid", json.dumps(self.service.costid)) pod_spec.add_annotation("ax_resources", json.dumps(self._ax_resources)) pod_spec.add_annotation("AX_SERVICE_ENV", self._gen_service_env()) for k in labels or []: pod_spec.add_label(k, labels[k]) return pod_spec.get_spec()
def insert_defaults(conf): """ This function inserts default that are required for Task processing :param conf: input conf :return: output conf """ if conf["template"].get("name", None) is None: conf["template"]["name"] = "main" else: conf["template"]["name"] = string_to_dns_label( conf["template"]["name"]) return conf
def generate_name(conf): """ This function generates a kubernetes job name from a service template and also ensures that the generated name has some relationship to human readable job names while also being unique. :param conf: service template :return: job name string """ if not conf["template"].get("once", True): # name is fully specified by caller. This is currently only used by # workflow executor. No user jobs are expected to use this code path # Workflow executor generates a unique name for the workflow so we # do not have to worry about generating one for it. name = conf.get("name", None) if name is None: raise ValueError( "name is a required field in service object for once=false." ) return string_to_dns_label(name) else: return string_to_dns_label(conf["id"])
def _get_main_container_vols(self): container_template = self.spec.template.get_main_container() ret = [] for vol_name, vol in iteritems(container_template.inputs.volumes): # sanitize the volume name for kubernetes vol_name = string_to_dns_label(vol_name) cvol = ContainerVolume(vol_name, vol.mount_path) assert "resource_id" in vol.details, "Volume resource-id absent in volume details" assert "filesystem" in vol.details, "Volume filesystem absent in volume details" cvol.set_type("AWS_EBS", vol_name, vol.details["resource_id"], vol.details["filesystem"]) logger.debug("Volume {} {} mounted at {}".format(vol_name, vol.details, vol.mount_path)) ret.append(cvol) return ret
def _create_main_container_spec(self, container_template): """ :type container_template: argo.template.v1.container.ContainerTemplate :rtype Container """ logger.debug("Container template is {}".format(container_template)) name = string_to_dns_label(container_template.name) container_spec = Container( name, container_template.image, pull_policy=container_template.image_pull_policy) container_spec.parse_probe_spec(container_template) # Necessary envs for handshake container_spec.add_env("AX_HANDSHAKE_VERSION", value=CUR_RECORD_VERSION) # Envs introduced to user container_spec.add_env("AX_POD_NAME", value_from="metadata.name") container_spec.add_env("AX_POD_IP", value_from="status.podIP") container_spec.add_env("AX_POD_NAMESPACE", value_from="metadata.namespace") container_spec.add_env("AX_NODE_NAME", value_from="spec.nodeName") container_spec.add_env("AX_CLUSTER_META_URL_V1", value=CLUSTER_META_URL_V1) # envs from user spec for env in container_template.env: (cfg_ns, cfg_name, cfg_key) = env.get_config() if cfg_ns is not None: secret = SecretResource(cfg_ns, cfg_name, self.name, self.application) secret.create() self._resources.insert(secret) container_spec.add_env( env.name, value_from_secret=(secret.get_resource_name(), cfg_key)) else: container_spec.add_env(env.name, value=env.value) # Unix socket for applet applet_sock = ContainerVolume("applet", "/tmp/applatix.io/") applet_sock.set_type("HOSTPATH", "/var/run/") container_spec.add_volume(applet_sock) return container_spec