def __init__(self): super(InitContainerSetup, self).__init__("axsetup", "busybox:1.27.2-musl") c = ContainerVolume("static-bin-share", "/copyout") c.set_type("EMPTYDIR") self.add_volume(c) self.command = ["/bin/cp", "-f", "/bin/true", "/copyout"]
class ArtifactsContainer(Container): """ This container defines the volumes and environments needed for artifacts management """ ARTIFACTS_CONTAINER_SCRATCH = "/ax-artifacts-scratch" def __init__(self, containername, customer_image, namespace, version): s = SoftwareInfo() super(ArtifactsContainer, self).__init__( containername, "{}/{}/artifacts:{}".format(s.registry, namespace, version)) # artifacts scratch space self._artifacts_scratch = ContainerVolume( "artifacts-scratch", ArtifactsContainer.ARTIFACTS_CONTAINER_SCRATCH) self._artifacts_scratch.set_type("EMPTYDIR") self.add_volume(self._artifacts_scratch) # create a hostpath for docker-socket-dir. This is used to for running docker inspect socket_hostpath = ContainerVolume("docker-socket-file", "/var/run/docker.sock") socket_hostpath.set_type("HOSTPATH", "/var/run/docker.sock") self.add_volume(socket_hostpath) # emptydir for sharing for copying static binaries from init container # so that they are available in the main container self._static_bins = ContainerVolume("static-bins", "/copyto") self._static_bins.set_type("EMPTYDIR") self.add_volume(self._static_bins) # add environment vars needed for artifacts self.add_env("AX_TARGET_CLOUD", value=Cloud().target_cloud()) self.add_env("AX_CLUSTER_NAME_ID", value=AXClusterId().get_cluster_name_id()) self.add_env("AX_CUSTOMER_ID", value=AXCustomerId().get_customer_id()) self.add_env("AX_CUSTOMER_IMAGE_NAME", value=customer_image) self.add_env("AX_ARTIFACTS_SCRATCH", value=ArtifactsContainer.ARTIFACTS_CONTAINER_SCRATCH) self.add_env("AX_POD_NAME", value_from="metadata.name") self.add_env("AX_POD_IP", value_from="status.podIP") self.add_env("AX_POD_NAMESPACE", value_from="metadata.namespace") self.add_env("AX_NODE_NAME", value_from="spec.nodeName") self.add_env("ARGO_LOG_BUCKET_NAME", os.getenv("ARGO_LOG_BUCKET_NAME", "")) annotation_vol = ContainerVolume("annotations", "/etc/axspec") annotation_vol.set_type("DOWNWARDAPI", "metadata.annotations") self.add_volume(annotation_vol) # AA-3175: CPU and memory are set to lowest possible so that pod requests are kept at a minimum self.add_resource_constraints("cpu_cores", 0.001) self.add_resource_constraints("mem_mib", 4) def get_artifacts_volume(self): return copy.deepcopy(self._artifacts_scratch) def get_static_bins_volume(self): return copy.deepcopy(self._static_bins)
def _container_to_pod(self, labels): # generate the service environment self._gen_service_env() pod_spec = PodSpec(self.jobname) pod_spec.restart_policy = "Never" main_container = self._container_spec() for vol_tag, vol in iteritems(self.service.template.inputs.volumes): # sanitize name for kubernetes vol_tag = string_to_dns_label(vol_tag) cvol = ContainerVolume(vol_tag, vol.mount_path) assert "resource_id" in vol.details and "filesystem" in vol.details, "resource_id and filesystem are required fields in volume details" cvol.set_type("AWS_EBS", vol_tag, vol.details["resource_id"], vol.details["filesystem"]) main_container.add_volume(cvol) logger.info("Mounting volume {} {} in {}".format( vol_tag, vol.details, vol.mount_path)) pod_spec.add_main_container(main_container) wait_container = self._generate_wait_container_spec() pod_spec.add_wait_container(wait_container) (cpu, mem, d_cpu, d_mem) = self._container_resources() main_container.add_resource_constraints("cpu_cores", cpu, limit=None) main_container.add_resource_constraints("mem_mib", mem, limit=mem) # handle artifacts self_sid = None if self.service.service_context: self_sid = self.service.service_context.service_instance_id # TODO: This function calls ax_artifact and needs to be rewritten. Ugly code. artifacts_container = pod_spec.enable_artifacts( self.software_info.image_namespace, self.software_info.image_version, self_sid, self.service.template.to_dict()) artifacts_container.add_env("AX_JOB_NAME", value=self.jobname) if self.service.template.docker_spec: dind_c = pod_spec.enable_docker( self.service.template.docker_spec.graph_storage_size_mib) dind_c.add_volumes(pod_spec.get_artifact_vols()) dind_c.add_resource_constraints("cpu_cores", d_cpu, limit=None) dind_c.add_resource_constraints("mem_mib", d_mem, limit=d_mem) service_id = None if self.service.service_context: service_id = self.service.service_context.service_instance_id pod_spec.add_annotation("ax_serviceid", service_id) pod_spec.add_annotation("ax_costid", json.dumps(self.service.costid)) pod_spec.add_annotation("ax_resources", json.dumps(self._ax_resources)) pod_spec.add_annotation("AX_SERVICE_ENV", self._gen_service_env()) for k in labels or []: pod_spec.add_label(k, labels[k]) return pod_spec.get_spec()
def __init__(self, customer_image): super(InitContainerPullImage, self).__init__(INIT_CONTAINER_NAME_PULLIMAGE, customer_image) c = ContainerVolume("static-bin-share", "/staticbin") c.set_type("EMPTYDIR") self.add_volume(c) self.command = ["/staticbin/true"] # AA-3175: CPU and memory are set to lowest possible so that pod requests are kept at a minimum self.add_resource_constraints("cpu_cores", 0.001) self.add_resource_constraints("mem_mib", 4)
def __init__(self, customer_image, namespace, version): super(SidecarTask, self).__init__(SIDEKICK_WAIT_CONTAINER_NAME, customer_image, namespace, version) # Sidecar needs to manage logs so add the log path here logpath = ContainerVolume("containerlogs", "/logs") logpath.set_type("HOSTPATH", "/var/lib/docker/containers") self.add_volume(logpath) self.add_env("LOGMOUNT_PATH", "/logs") self.add_env("AX_CLUSTER_NAME_ID", os.getenv("AX_CLUSTER_NAME_ID")) # set the arguments self.args = ["post"]
def generate_volumes_for_artifacts(): test_mode = False if AXArtifacts.is_test_service_instance(sid): test_mode = True art_volumes = AXArtifacts.get_extra_artifact_in_volume_mapping( in_artifacts_spec, ARTIFACTS_CONTAINER_SCRATCH_PATH, "in", test_mode=test_mode, self_sid=sid) ret_vols = [] initc_vols = [] i = 0 already_mapped = {} for initc_path, mount_path in art_volumes or []: name = "ax-art-{}".format(i) c = ContainerVolume(name, mount_path) c.set_type("EMPTYDIR") c_init = ContainerVolume(name, initc_path) c_init.set_type("EMPTYDIR") i += 1 if mount_path not in already_mapped: ret_vols.append(c) initc_vols.append(c_init) already_mapped[mount_path] = True return ret_vols, initc_vols
def _get_main_container_vols(self): container_template = self.spec.template.get_main_container() ret = [] for vol_name, vol in iteritems(container_template.inputs.volumes): # sanitize the volume name for kubernetes vol_name = string_to_dns_label(vol_name) cvol = ContainerVolume(vol_name, vol.mount_path) assert "resource_id" in vol.details, "Volume resource-id absent in volume details" assert "filesystem" in vol.details, "Volume filesystem absent in volume details" cvol.set_type("AWS_EBS", vol_name, vol.details["resource_id"], vol.details["filesystem"]) logger.debug("Volume {} {} mounted at {}".format(vol_name, vol.details, vol.mount_path)) ret.append(cvol) return ret
def _create_main_container_spec(self, container_template): """ :type container_template: argo.template.v1.container.ContainerTemplate :rtype Container """ logger.debug("Container template is {}".format(container_template)) name = string_to_dns_label(container_template.name) container_spec = Container( name, container_template.image, pull_policy=container_template.image_pull_policy) container_spec.parse_probe_spec(container_template) # Necessary envs for handshake container_spec.add_env("AX_HANDSHAKE_VERSION", value=CUR_RECORD_VERSION) # Envs introduced to user container_spec.add_env("AX_POD_NAME", value_from="metadata.name") container_spec.add_env("AX_POD_IP", value_from="status.podIP") container_spec.add_env("AX_POD_NAMESPACE", value_from="metadata.namespace") container_spec.add_env("AX_NODE_NAME", value_from="spec.nodeName") container_spec.add_env("AX_CLUSTER_META_URL_V1", value=CLUSTER_META_URL_V1) # envs from user spec for env in container_template.env: (cfg_ns, cfg_name, cfg_key) = env.get_config() if cfg_ns is not None: secret = SecretResource(cfg_ns, cfg_name, self.name, self.application) secret.create() self._resources.insert(secret) container_spec.add_env( env.name, value_from_secret=(secret.get_resource_name(), cfg_key)) else: container_spec.add_env(env.name, value=env.value) # Unix socket for applet applet_sock = ContainerVolume("applet", "/tmp/applatix.io/") applet_sock.set_type("HOSTPATH", "/var/run/") container_spec.add_volume(applet_sock) return container_spec
def add_configs_as_vols(self, configs, step_name, step_ns): """ Some configs will be passed as secrets and these need to be loaded into the init container so that init container can convert the params in command and args to the necessary secret :param configs: A list of tuples of (config_namespace, config_name) """ res_list = [] for (cfg_ns, cfg_name) in configs or []: res = SecretResource(cfg_ns, cfg_name, step_name, step_ns) res.create() vol = ContainerVolume(res.get_resource_name(), "/ax_secrets/{}/{}".format(cfg_ns, cfg_name)) vol.set_type("SECRET", res.get_resource_name()) self.add_volume(vol) res_list.append(res) return res_list
def __init__(self, size_in_mb): super(SidecarDockerDaemon, self).__init__(DIND_CONTAINER_NAME, "argoproj/dind:1.12.6") # Add lib modules for dind to load aufs module. libmodule_hostpath = ContainerVolume("kernel-lib-module", "/lib/modules") libmodule_hostpath.set_type("HOSTPATH", "/lib/modules") self.add_volume(libmodule_hostpath) # Add per node dgs to sidecar dgs_vol = ContainerVolume("docker-graph-storage", "/var/lib/docker") if Cloud().target_cloud_aws(): dgs_vol.set_type("DOCKERGRAPHSTORAGE", size_in_mb) elif Cloud().target_cloud_gcp(): dgs_vol.set_type("EMPTYDIR") self.add_volume(dgs_vol) # dind daemon needs to be privileged! self.privileged = True
def __init__(self, customer_image): super(InitContainerPullImage, self).__init__(INIT_CONTAINER_NAME_PULLIMAGE, customer_image) nothing_hostpath = ContainerVolume("bin-nothing", "/bin/nothing") if Cloud().in_cloud_aws(): nothing_hostpath.set_type("HOSTPATH", "/bin/nothing") elif Cloud().in_cloud_gcp(): nothing_hostpath.set_type("HOSTPATH", "/etc/nothing") self.add_volume(nothing_hostpath) self.command = ["/bin/nothing"] # AA-3175: CPU and memory are set to lowest possible so that pod requests are kept at a minimum self.add_resource_constraints("cpu_cores", 0.001) self.add_resource_constraints("mem_mib", 4)
def _create_deployment_spec(self): pod_spec = PodSpec(self.name, namespace=self.application) main_container = self.spec.template.get_main_container() main_container_spec = self._create_main_container_spec(main_container) pod_spec.add_main_container(main_container_spec) container_vols = self._get_main_container_vols() main_container_spec.add_volumes(container_vols) hw_res = main_container.get_resources() main_container_spec.add_resource_constraints("cpu_cores", hw_res.cpu_cores, limit=None) main_container_spec.add_resource_constraints("mem_mib", hw_res.mem_mib, limit=None) artifacts_container = pod_spec.enable_artifacts( self._software_info.image_namespace, self._software_info.image_version, None, main_container.to_dict()) # Set up special circumstances based on annotations # Check if we need to circumvent the executor script. This is needed for containers that run # special init processes such as systemd as these processes like to be pid 1 if main_container.executor_spec: main_container_spec.command = None if main_container.docker_spec is not None: raise ValueError( "We do not support ax_ea_docker_enable with ax_ea_executor" ) # Does this container need to be privileged main_container_spec.privileged = main_container.privileged # Check if docker daemon sidecar needs to be added if main_container.docker_spec: # graph storage size is specified in GiB dind_container_spec = pod_spec.enable_docker( main_container.docker_spec.graph_storage_size_mib) dind_container_spec.add_volumes(pod_spec.get_artifact_vols()) dind_container_spec.add_resource_constraints( "cpu_cores", main_container.docker_spec.cpu_cores, limit=None) dind_container_spec.add_resource_constraints( "mem_mib", main_container.docker_spec.mem_mib, limit=None) dind_container_spec.add_volumes(container_vols) # Do we only need docker graph storage volume for the main container if main_container.graph_storage: dgs_vol = ContainerVolume("graph-storage-vol-only", main_container.graph_storage.mount_path) dgs_vol.set_type( "DOCKERGRAPHSTORAGE", main_container.graph_storage.graph_storage_size_mib) main_container_spec.add_volume(dgs_vol) # set the pod hostname to value provided in main container spec pod_spec.hostname = main_container.hostname # TODO: This needs fixup. job name is used in init container to ask permission to start # TODO: Don't know if this is needed in deployment or not? artifacts_container.add_env("AX_JOB_NAME", value=self.application) artifacts_container.add_env("AX_DEPLOYMENT_NEW", value="True") if len(container_vols) > 0: tmp_container_vols = copy.deepcopy(container_vols) volume_paths = [] for v in tmp_container_vols: v.set_mount_path("/ax/fix" + v.volmount.mount_path) volume_paths.append(v.volmount.mount_path) artifacts_container.add_volumes(tmp_container_vols) logger.info("Volumes to chmod: %s", volume_paths) artifacts_container.add_env("AX_VOL_MOUNT_PATHS", value=str(volume_paths)) # add annotation for service env which will show up in artifacts container pod_spec.add_annotation("AX_SERVICE_ENV", self._generate_service_env(self.spec.template)) pod_spec.add_annotation("AX_IDENTIFIERS", self._get_identifiers()) if self.spec.costid: pod_spec.add_annotation("ax_costid", json.dumps(self.spec.costid)) pod_spec.add_label("deployment", self.name) pod_spec.add_label("application", self.application) pod_spec.add_label("tier", "user") pod_spec.add_label("deployment_id", self.spec.id) # now that pod is ready get its spec and wrap it in a deployment k8s_spec = self._generate_deployment_spec_for_pod(pod_spec.get_spec()) logger.info("Generated Kubernetes spec for deployment %s", self.name) return k8s_spec