def find_dockerfiles(repository_name, match=True): dockerfiles = {} repository_dir = os.path.join(CONF.repositories.path, repository_name) for root, __, files in os.walk(repository_dir): if 'Dockerfile.j2' in files: path = os.path.join(root, 'Dockerfile.j2') else: continue name = os.path.basename(os.path.dirname(path)) spec = images.image_spec(name, add_address=CONF.builder.push) dockerfiles[name] = { 'name': name, 'full_name': spec, 'path': path, 'parent': None, 'children': [], 'match': match, 'build_result': None, 'push_result': None, 'content': None, 'sources': None, } if len(dockerfiles) == 0: msg = 'No dockerfile for %s found' if CONF.repositories.skip_empty: LOG.debug(msg, repository_name) else: LOG.error(msg, repository_name) sys.exit(1) return dockerfiles
def serialize_daemon_container_spec(container): cont_spec = { "name": container["name"], "image": images.image_spec(container["image"]), "command": _get_start_cmd(container["name"]), "volumeMounts": serialize_volume_mounts(container), "readinessProbe": { "exec": { "command": _get_readiness_cmd(container["name"]) }, "timeoutSeconds": 1 }, "env": serialize_env_variables(container) } cont_spec['env'].append({ "name": "CM_VERSION", "value": container['cm_version'] }) liveness = container.get("probes", {}).get("liveness", {}) if liveness: liveness_spec = serialize_liveness_probe(liveness) cont_spec.update(liveness_spec) cont_spec["securityContext"] = { "privileged": container.get("privileged", False) } return cont_spec
def serialize_daemon_container_spec(container): cont_spec = { "name": container["name"], "image": images.image_spec(container["image"]), "imagePullPolicy": CONF.kubernetes.image_pull_policy, "command": get_start_cmd(container["name"]), "volumeMounts": serialize_volume_mounts(container), "readinessProbe": { "exec": { "command": _get_readiness_cmd(container["name"]) }, "timeoutSeconds": 1 }, "env": serialize_env_variables(container) } cont_spec['env'].append({ "name": "CM_VERSION", "value": container['cm_version'] }) liveness = container.get("probes", {}).get("liveness", {}) if liveness: liveness_spec = serialize_liveness_probe(liveness) cont_spec.update(liveness_spec) cont_spec["securityContext"] = {"privileged": container.get("privileged", False)} lifecycle = container.get("lifecycle", None) if lifecycle: cont_spec["lifecycle"] = lifecycle return cont_spec
def serialize_job_container_spec(container, job): return { "name": job["name"], "image": images.image_spec(container["image"]), "command": _get_start_cmd(job["name"]), "volumeMounts": serialize_volume_mounts(container), "env": serialize_env_variables(container) }
def serialize_job_container_spec(container, job): return { "name": job["name"], "image": images.image_spec(job.get('image') or container["image"]), "imagePullPolicy": CONF.kubernetes.image_pull_policy, "command": get_start_cmd(job["name"]), "volumeMounts": serialize_volume_mounts(container, job), "env": serialize_env_variables(container) }
def _create_action(self): cont_spec = { "name": self.k8s_name, "image": config_images.image_spec(self.image), "imagePullPolicy": CONF.kubernetes.image_pull_policy, "command": templates.get_start_cmd(self.name), "volumeMounts": [{ "name": "config-volume", "mountPath": "/etc/ccp" }, { "name": "start-script", "mountPath": "/opt/ccp_start_script/bin" }], "env": templates.serialize_env_variables({}), "restartPolicy": "Never" } config_volume_items = [{ "key": "config", "path": "globals/globals.json" }, { "key": "nodes-config", "path": "nodes-config/nodes-config.json" }, { "key": "workflow", "path": "role/%s.json" % self.name }] for f in self.files: config_volume_items.append({ "key": f["content"], "path": "files/%s" % f["content"] }) pod_spec = { "metadata": { "name": self.k8s_name, "labels": { "app": self.name, "ccp": "true", "ccp-action": "true", "ccp-component": self.component } }, "spec": { "containers": [cont_spec], "restartPolicy": "Never", "volumes": [{ "name": "config-volume", "configMap": { "name": self.k8s_name, "items": config_volume_items } }, { "name": "start-script", "configMap": { "name": templates.SCRIPT_CONFIG, "items": [{ "key": templates.SCRIPT_CONFIG, "path": "start_script.py" }] } }] } } if self.restart_policy == RESTART_POLICY_NEVER: self._create_pod(pod_spec) elif self.restart_policy == RESTART_POLICY_ALWAYS: self._create_job(pod_spec) else: raise ValueError("Restart policy %s is not supported" % (self.restart_policy))
def image_spec(image_name): if parent: raise RuntimeError('You can use image_spec only once in FROM line') parent.append(image_name) return images.image_spec(image_name, add_address=CONF.builder.push)
def _create_action(self): cont_spec = { "name": self.k8s_name, "image": config_images.image_spec(self.image), "imagePullPolicy": CONF.kubernetes.image_pull_policy, "command": templates.get_start_cmd(self.name), "volumeMounts": [ { "name": "config-volume", "mountPath": "/etc/ccp" }, { "name": "start-script", "mountPath": "/opt/ccp_start_script/bin" } ], "env": templates.serialize_env_variables({}), "restartPolicy": "Never" } config_volume_items = [ { "key": "config", "path": "globals/globals.json" }, { "key": "secret-config", "path": "global-secrets/global-secrets.json" }, { "key": "nodes-config", "path": "nodes-config/nodes-config.json" }, { "key": "workflow", "path": "role/%s.json" % self.name } ] for f in self.files: config_volume_items.append({ "key": f["content"], "path": "files/%s" % f["content"] }) for ex_item in utils.get_repositories_exports().values(): config_volume_items.append({ "key": ex_item["name"], "path": "exports/%s" % ex_item["name"] }) pod_spec = { "metadata": { "name": self.k8s_name, "labels": { "app": self.name, "ccp": "true", "ccp-action": "true", "ccp-component": self.component } }, "spec": { "containers": [cont_spec], "restartPolicy": "Never", "volumes": [ { "name": "config-volume", "configMap": { "name": self.k8s_name, "items": config_volume_items } }, { "name": "start-script", "configMap": { "name": templates.SCRIPT_CONFIG, "items": [ { "key": templates.SCRIPT_CONFIG, "path": "start_script.py" } ] } } ] } } if self.restart_policy == RESTART_POLICY_NEVER: self._create_pod(pod_spec) elif self.restart_policy == RESTART_POLICY_ALWAYS: self._create_job(pod_spec) else: raise ValueError("Restart policy %s is not supported" % ( self.restart_policy))