def _delete_kube_config(self, ctx: Context, config: Path, dry_run: bool): if dry_run: logger.info(f"[DRY RUN] Deleting {config}") return logger.info(f"Deleting {config}") run(["kubectl", "delete", "-f", config])
def release_env(ctx: Context, env, dry_run=False): env_path = Path("envs") / env secrets = sorted([ secret_file for secret_file in (env_path / "secrets").glob("*.yaml") if not secret_file.name.endswith(UNSEALED_SECRETS_EXTENSION) ]) for secret in secrets: # Sealed Secrets can't be validated like this # ctx.run(f"kubeval {secret}") if dry_run: logger.info(f"[DRY RUN] Applying {secret}") continue logger.info(f"Applying {secret}") run(["kubectl", "apply", "-f", secret]) old_secrets = (env_path / "secrets" / "obsolete").glob("*.yaml") for secret in sorted(old_secrets, reverse=True): if dry_run: logger.info(f"[DRY RUN] Deleting {secret}") continue logger.info(f"Deleting {secret}") run(["kubectl", "delete", "-f", secret])
def kubeval(keep_configs=False): """ Check that all Kubernetes configs look valid with kubeval """ label(logger.info, "Checking Kubernetes configs") def _should_ignore(path): if TMP in path.parents: return True return False merge_tmp = TMP / f"kubeval-{generate_random_id()}" kube_yamls = [ str(get_merged_kube_file(path, merge_tmp)) for path in Path(".").glob("**/kube/*.yaml") if not _should_ignore(path) ] skip_kinds = ",".join(KUBEVAL_SKIP_KINDS) run(["kubeval", "--strict", "--skip-kinds", skip_kinds] + kube_yamls) if not keep_configs and merge_tmp.exists(): logger.info(f"Removing temporary kube merges from {merge_tmp}") rmtree(merge_tmp) if keep_configs and merge_tmp.exists(): logger.info(f"Keeping temporary kube merges in {merge_tmp}")
def _release_kube_config(self, ctx: Context, config: Path, dry_run: bool): if dry_run: logger.info(f"[DRY RUN] Applying {config}") return logger.info(f"Applying {config}") run(["kubectl", "apply", "-f", config])
def init_hooks(ctx): """ Initialize version control hooks :param Context ctx: """ label(logger.info, "Installing pre-commit hooks") run(["pre-commit", "install"])
def build(self, ctx: Context, dry_run=False): label(logger.info, f"Building {self.path}") dockerfile = self.path / "Dockerfile" if not dockerfile.exists(): logger.info(f"No Dockerfile for {self.name} component") return if dry_run: logger.info(f"[DRY RUN] Building {self.name} Docker image") else: logger.info(f"Building {self.name} Docker image") tag = self._get_full_docker_name() run(["docker", "build", self.path, "-t", tag], stream=True)
def _restart_resource( self, ctx: Context, resource: str, dry_run: bool, no_rollout_wait: bool ): if dry_run: logger.info(f"[DRY RUN] Restarting resource {resource}") return logger.info(f"Restarting resource {resource}") run(["kubectl", "-n", self.namespace, "rollout", "restart", resource]) if not no_rollout_wait: run( ["kubectl", "-n", self.namespace, "rollout", "status", resource], timeout=ROLLOUT_TIMEOUT, )
def get_master_key(env: str) -> None: """ Get the master key for SealedSecrets for the given env. :param str env: The environment """ settings = load_env_settings(env) ensure_context(settings.KUBE_CONTEXT) label(logger.info, f"Getting master key for {env}") # Based on: # https://github.com/bitnami-labs/sealed-secrets#how-can-i-do-a-backup-of-my-sealedsecrets result = run([ "kubectl", "get", "secret", "-n", "kube-system", "-l", "sealedsecrets.bitnami.com/sealed-secrets-key", "-o", "yaml", ]) content = result.stdout.decode(encoding="utf-8") output_file = master_key_path(env=env) logger.info(f"Saving master key to {output_file}") output_file.write_text(content, encoding="utf-8")
def kube_unseal(content: str, master_key: Path, cert: Path) -> str: """ Decrypt given content using kubeseal. :param str content: The content of the "SealedSecrets" yaml file. :param Path master_key: The private key to use for decryption. :param Path cert: Certificate / public key file to use for encryption. :return str: The content of a Kubernetes "Secrets" yaml file. """ result = run( [ "kubeseal", "--recovery-unseal", "--recovery-private-key", master_key, "-o", "yaml", # Add the --cert flag to allow this to run also without a # ~/.kube/config file, for example in Travis. # For more details please see: # https://github.com/bitnami-labs/sealed-secrets/issues/341 "--cert", cert, ], input=content.encode(encoding="utf-8"), ) return result.stdout.decode(encoding="utf-8")
def _try_post_release( self, ctx: Context, resource: str, selector: str, dry_run: bool ): if dry_run: logger.info(f"[DRY RUN] Running post-release.sh for {resource}") return result = run( [ "kubectl", "-n", self.namespace, "get", "pods", "-l", selector, "-o", "json", ] ) pods = [] image = self._get_full_docker_name() for pod in json.loads(result.stdout)["items"]: for container in pod["spec"]["containers"]: if container["image"] == image: pods.append(pod["metadata"]["name"]) if not pods: raise Exception(f"No running pods with correct image found for {resource}") pod = random.choice(pods) # nosec run( [ "kubectl", "-n", self.namespace, "exec", "-it", pod, "sh", "post-release.sh", ], check=False, )
def cleanup_acr_repository(ctx, registry, repository): """ Clean up a single repository in Azure Container Registry :param Context ctx: :param str registry: :param str repository: """ label(logger.info, f"Cleaning up ACR {registry}/{repository} repository") result = run([ "az", "acr", "repository", "show-tags", "--name", registry, "--repository", repository, ]) tags = json.loads(result.stdout) # <branch>-<hash>-<YYYYMMDD>-<HHMMSS> tag_match = re.compile(r"^([^-]+)-([A-Za-z0-9]{7})-([0-9]+)-([0-9]+)$") def _sort_tag(key): """ From <branch>-<hash>-<datetime> to <datetime>-<branch>-<hash> :param str key: :return str: """ return re.sub(tag_match, "\\3-\\4-\\1-\\2", key) for tag in sorted(tags, key=_sort_tag)[MAX_TAGS:]: print(f"Deleting old tag {tag}") run([ "az", "acr", "repository", "delete", "--yes", "--name", registry, "--image", f"{repository}:{tag}", ])
def cleanup_acr_registry(ctx, registry): """ Clean up a whole Azure Container Registry :param Context ctx: :param str registry: Name of the ACR, i.e. <name>.azurecr.io """ big_label(logger.info, f"Cleaning up ACR registry {registry}") result = run(["az", "acr", "repository", "list", "--name", registry]) repositories = json.loads(result.stdout) for repository in repositories: cleanup_acr_repository(ctx, registry, repository)
def build(self, ctx: Context, dry_run=False, docker_args=None): label(logger.info, f"Building {self.path}") dockerfile = self.path / "Dockerfile" if not dockerfile.exists(): logger.info(f"No Dockerfile for {self.name} component") return build_args = [] if docker_args: # Insert --build-arg before each item from docker_args. for docker_arg in docker_args: build_args.extend(["--build-arg", docker_arg]) if dry_run: logger.info(f"[DRY RUN] Building {self.name} Docker image") else: logger.info(f"Building {self.name} Docker image") tag = self._get_full_docker_name() run(["docker", "build", *build_args, self.path, "-t", tag], stream=True)
def release_env(ctx: Context, env, dry_run=False): env_path = Path("envs") / env secrets = (env_path / "secrets").glob("*.yaml") for secret in sorted(secrets): # Sealed Secrets can't be validated like this # ctx.run(f"kubeval {secret}") if dry_run: logger.info(f"[DRY RUN] Applying {secret}") continue logger.info(f"Applying {secret}") run(["kubectl", "apply", "-f", secret]) old_secrets = (env_path / "secrets" / "obsolete").glob("*.yaml") for secret in sorted(old_secrets, reverse=True): if dry_run: logger.info(f"[DRY RUN] Deleting {secret}") continue logger.info(f"Deleting {secret}") run(["kubectl", "delete", "-f", secret])
def kube_seal(content: str, cert: Path) -> str: """ Encrypt given content using kubeseal. :param str content: The content of a Kubernetes "Secrets" yaml file. :param Path cert: Certificate / public key file to use for encryption. :return str: The content of the "SealedSecrets" yaml file. """ result = run( ["kubeseal", "--cert", cert, "-o", "yaml"], input=content.encode(encoding="utf-8"), ) return result.stdout.decode(encoding="utf-8").rstrip() + "\n"
def validate(self, ctx=None): if not self.kube_configs: raise ValueError(f"No kube configs found in {self.path / 'kube'}") if not ctx: return skip_kinds = ",".join(KUBEVAL_SKIP_KINDS) for file in self.kube_configs: path = self.kube_configs[file] result = run(["kubeval", "--skip-kinds", skip_kinds, path]) if result.returncode > 0: raise ValidationError(f"Validation failed for {path}")
def kubeval(ctx): """ Check that all Kubernetes configs look valid with kubeval :param Context ctx: """ label(logger.info, "Checking Kubernetes configs") def _should_ignore(path): s = str(path) if s.startswith("temp"): return True return False kube_yamls = [ str(path) for path in Path(".").glob("**/kube/*.yaml") if not _should_ignore(path) ] skip_kinds = ",".join(devops.settings.KUBEVAL_SKIP_KINDS) run(["kubeval", "--skip-kinds", skip_kinds] + kube_yamls)
def _update_from_templates_hook(ctx): """ Update kube yaml merges from templates in a way that will work nicely with pre-commit hooks. :param Context ctx: """ rendered_files = devops.tasks.update_from_templates() result = run(["git", "status", "--untracked-files=all", "-s"]) untracked_files = result.stdout.decode(encoding="utf-8").split() statuses = untracked_files[0::2] files = untracked_files[1::2] # Mapping from file path to git short status untracked_files = {f: status for status, f in zip(statuses, files)} for f in rendered_files: if untracked_files.get(str(f)) == "??": raise ValueError( f"Rendered file {f} is untracked, use 'git add' to add it!")
def get_master_key(env: str, use_existing=True) -> Path: """ Get the master key for SealedSecrets for the given env. :param str env: The environment :param bool use_existing: If set to True, tries to use existing key from filesystem instead of fetching a new one from the cluster. :return Path: The path to the master key """ settings = load_env_settings(env) master_key_file = master_key_path(env=env) if use_existing and master_key_file.exists(): return master_key_file ensure_context(settings.KUBE_CONTEXT) label(logger.info, f"Getting master key for {env}") # Based on: # https://github.com/bitnami-labs/sealed-secrets#how-can-i-do-a-backup-of-my-sealedsecrets result = run([ "kubectl", "get", "secret", "-n", "kube-system", "-l", "sealedsecrets.bitnami.com/sealed-secrets-key", "-o", "yaml", ]) content = result.stdout.decode(encoding="utf-8") logger.info(f"Saving master key to {master_key_file}") master_key_file.write_text(content, encoding="utf-8") return master_key_file
def ensure_namespace(namespace): """ Ensure Kubernetes cluster has the given namespace :param str namespace: """ run(["kubectl", "create", "namespace", namespace], check=False)
def ensure_context(context): """ Ensure Kubernetes CLI is using the given context :param str context: """ run(["kubectl", "config", "use-context", context])
def test_run(): res = run(["python", "--version"]) ver = sys.version.split(" ")[0] assert res.stdout.decode("utf-8").strip() == f"Python {ver}"
def _apply(config, **kwargs): run(["kubectl", "apply", "-f", config], **kwargs)
def init_kubernetes(ctx, env): """ Initialize Kubernetes cluster :param Context ctx: :param str env: :return: """ label(logger.info, f"Initializing Kubernetes for {env}") settings = load_env_settings(env) devops.tasks.ensure_context(settings.KUBE_CONTEXT) devops.tasks.ensure_namespace(settings.KUBE_NAMESPACE) def _get_kube_files(kube_context): kube_files = {f.name: f for f in Path("kube").glob("*.yaml")} overrides = (Path("kube") / kube_context / "overrides").glob("*.yaml") for f in overrides: kube_files[f.name] = f # Convert to sorted list kube_files = [kube_files[name] for name in sorted(kube_files.keys())] return kube_files def _apply(config, **kwargs): run(["kubectl", "apply", "-f", config], **kwargs) secrets = Path("envs") / env / "secrets.pem" if env == LOCAL_ENV: # Make sure local Sealed Secrets master key is applied first master_key = Path("envs") / env / "secrets.key" if master_key.exists(): logger.info( f"Applying Sealed Secrets master key from {master_key}") _apply(master_key, check=False) for c in _get_kube_files(settings.KUBE_CONTEXT): _apply(c) # Wait for Sealed Secrets -controller to start up run([ "kubectl", "rollout", "status", "--namespace", "kube-system", "deploy/sealed-secrets-controller", ]) # And try to dump the signing cert logger.info("Trying to fetch Sealed Secrets signing cert") attempts = 5 while True: try: res = run(["kubeseal", "--fetch-cert"]) except CalledProcessError: attempts -= 1 if attempts <= 0: raise Exception("Failed to fetch Sealed Secrets cert") sleep(2) continue with secrets.open("w") as dst: dst.write(res.stdout.decode("utf-8")) break if env == LOCAL_ENV: # Store master key if needed master_key = Path("envs") / env / "secrets.key" if not master_key.exists(): logger.info("Trying to store Sealed Secrets master key") res = run([ "kubectl", "get", "secret", "--namespace", "kube-system", "-o", "custom-columns=name:metadata.name", ]) secrets = [] for line in res.stdout.decode("utf-8").splitlines(): if line.startswith("sealed-secrets-key"): secrets.append(line) with master_key.open("w") as dst: first = True for secret in secrets: if not first: dst.write("---\n") first = False res = run([ "kubectl", "get", "secret", "--namespace", "kube-system", secret, "-o", "yaml", ]) print(res.stdout) dst.write(res.stdout.decode("utf-8") + "\n")