Exemplo n.º 1
0
def get_master_key(env: str) -> None:
    """
    Get the master key for SealedSecrets for the given env.

    :param str env: The environment
    """
    settings = load_env_settings(env)
    ensure_context(settings.KUBE_CONTEXT)

    label(logger.info, f"Getting master key for {env}")

    # Based on:
    # https://github.com/bitnami-labs/sealed-secrets#how-can-i-do-a-backup-of-my-sealedsecrets
    result = run([
        "kubectl",
        "get",
        "secret",
        "-n",
        "kube-system",
        "-l",
        "sealedsecrets.bitnami.com/sealed-secrets-key",
        "-o",
        "yaml",
    ])

    content = result.stdout.decode(encoding="utf-8")
    output_file = master_key_path(env=env)

    logger.info(f"Saving master key to {output_file}")

    output_file.write_text(content, encoding="utf-8")
Exemplo n.º 2
0
def kubeval(keep_configs=False):
    """
    Check that all Kubernetes configs look valid with kubeval
    """

    label(logger.info, "Checking Kubernetes configs")

    def _should_ignore(path):
        if TMP in path.parents:
            return True

        return False

    merge_tmp = TMP / f"kubeval-{generate_random_id()}"

    kube_yamls = [
        str(get_merged_kube_file(path, merge_tmp))
        for path in Path(".").glob("**/kube/*.yaml")
        if not _should_ignore(path)
    ]

    skip_kinds = ",".join(KUBEVAL_SKIP_KINDS)

    run(["kubeval", "--strict", "--skip-kinds", skip_kinds] + kube_yamls)

    if not keep_configs and merge_tmp.exists():
        logger.info(f"Removing temporary kube merges from {merge_tmp}")
        rmtree(merge_tmp)
    if keep_configs and merge_tmp.exists():
        logger.info(f"Keeping temporary kube merges in {merge_tmp}")
Exemplo n.º 3
0
    def _patch_generic(self, doc: dict):
        logger.info("Applying generic patches")
        meta = doc["metadata"]

        if self.namespace:
            logger.info(f"Updating namespace to {self.namespace}")
            meta["namespace"] = self.namespace
Exemplo n.º 4
0
    def _release_kube_config(self, ctx: Context, config: Path, dry_run: bool):
        if dry_run:
            logger.info(f"[DRY RUN] Applying {config}")
            return

        logger.info(f"Applying {config}")
        run(["kubectl", "apply", "-f", config])
Exemplo n.º 5
0
    def _delete_kube_config(self, ctx: Context, config: Path, dry_run: bool):
        if dry_run:
            logger.info(f"[DRY RUN] Deleting {config}")
            return

        logger.info(f"Deleting {config}")
        run(["kubectl", "delete", "-f", config])
Exemplo n.º 6
0
def unseal_secrets(env: str) -> None:
    """
    Decrypts the secrets for the desired env and base64 decodes them to make
    them easy to edit.

    :param str env: The environment.
    """
    # Validate env
    load_env_settings(env)

    master_key = get_master_key(env=env)
    secrets_pem = secrets_pem_path(env=env)

    sealed_secret_files = [
        secret_file
        for secret_file in (Path("envs") / env / "secrets").glob("*.yaml")
        if not secret_file.name.endswith(UNSEALED_SECRETS_EXTENSION)
    ]

    label(logger.info, f"Unsealing secrets for {env}")

    for input_file in sealed_secret_files:
        output_file = input_file.with_name(input_file.stem +
                                           UNSEALED_SECRETS_EXTENSION)

        logger.info(f"Unsealing {input_file} to {output_file}")

        content = input_file.read_text(encoding="utf-8")

        content = kube_unseal(content, master_key, cert=secrets_pem)
        content = base64_decode_secrets(content)

        output_file.write_text(content, encoding="utf-8")
Exemplo n.º 7
0
def seal_secrets(env: str) -> None:
    """
    Base64 encodes and seals the secrets for the desired env.

    :param str env: The environment.
    """
    # Validate env
    load_env_settings(env)

    secrets_pem = secrets_pem_path(env=env)

    unsealed_secret_files = (Path("envs") / env /
                             "secrets").glob(f"*{UNSEALED_SECRETS_EXTENSION}")

    label(logger.info, f"Sealing secrets for {env}")

    for input_file in unsealed_secret_files:
        output_file_name = input_file.name[:-len(UNSEALED_SECRETS_EXTENSION
                                                 )] + ".yaml"
        output_file = input_file.with_name(output_file_name)

        logger.info(f"Sealing {input_file} as {output_file}")

        content = input_file.read_text(encoding="utf-8")

        content = base64_encode_secrets(content)
        content = kube_seal(content, cert=secrets_pem)

        output_file.write_text(content, encoding="utf-8")
Exemplo n.º 8
0
    def render_template_kind(self, kind, env, settings):
        plural_kind = f"{kind}s"
        if kind not in TEMPLATE_KINDS:
            raise Exception(f"Unsupported kind of template: {kind}")

        output_path = Path("envs") / env / plural_kind / self.path.as_posix() / "kube"

        # Remove all old rendered files of this kind, but leave any manually created ones
        logger.info(f"Cleaning up old {kind} files for {self.name} for env {env}")
        old_files = output_path.glob("*.yaml")
        for old_file in old_files:
            template_path = old_file.relative_to(Path("envs") / env / plural_kind)
            template_path = (
                template_path.parent / f"{kind}-templates" / template_path.name
            )

            with old_file.open(mode="r", encoding="utf-8") as f:
                content = f.read()
            if content.startswith(TEMPLATE_HEADER.format(file=template_path)):
                old_file.unlink()
                logger.debug(f"Deleted rendered file {old_file}")
            else:
                logger.debug(
                    f"Keeping {kind} file {old_file}, it does not appear to have been rendered from a template"
                )

        jinja_context = getattr(settings, "TEMPLATE_VARIABLES", {})
        rendered_files = []

        if not self.kube_templates[kind]:
            return rendered_files

        logger.info(f"Creating {kind} files for {self.name} for env {env}")

        if not output_path.is_dir():
            output_path.mkdir(mode=0o700, parents=True)

        for name, template_path in self.kube_templates[kind].items():
            with template_path.open(mode="r", encoding="utf-8") as f:
                content = f.read()

            template = jinja2.Template(content, undefined=jinja2.StrictUndefined)
            try:
                content = TEMPLATE_HEADER.format(file=template_path)
                content += template.render(jinja_context)
                content += "\n"
            except jinja2.exceptions.UndefinedError as ex:
                raise Exception(
                    f"Failed to render template {template_path} for env {env}, "
                    f"reason: {ex.message}"
                )

            output_file = output_path / name
            with output_file.open(mode="w", encoding="utf-8") as f:
                f.write(content)
                rendered_files.append(output_file)
            logger.debug(f"Rendered {kind} file {output_file}")

        return rendered_files
Exemplo n.º 9
0
    def patch_from_env(self, env):
        env_path = Path("envs") / env / "overrides" / self.path.as_posix()
        for match in (env_path / "kube").glob("*.yaml"):
            logger.info(f"Found kube override {match.name} for {self.name} in {env}")
            self.kube_configs[match.name] = match

        merge_path = Path("envs") / env / "merges" / self.path.as_posix()
        for match in (merge_path / "kube").glob("*.yaml"):
            logger.info(f"Found kube merges {match.name} for {self.name} in {env}")
            self.kube_merges[match.name] = match
Exemplo n.º 10
0
def load_env_settings(env: str) -> Settings:
    module = f"envs.{env}.settings"
    logger.info(f"Loading settings from {module}")
    settings = importlib.import_module(module)

    # Set some defaults for optional values
    settings.IMAGE_PULL_SECRETS = getattr(settings, "IMAGE_PULL_SECRETS", {})
    settings.REPLICAS = getattr(settings, "REPLICAS", {})

    return settings
Exemplo n.º 11
0
    def _get_kube_configs(self, path=None):
        if path is None:
            path = self.path

        config = {}
        for match in (path / "kube").glob("*.yaml"):
            logger.info(f"Found kube config {match.name} for {self.name}")
            config[match.name] = match

        return config
Exemplo n.º 12
0
    def _get_kube_templates(self, path=None):
        if path is None:
            path = self.path

        templates = defaultdict(dict)
        for kind in TEMPLATE_KINDS:
            for match in (path / "kube" / f"{kind}-templates").glob("*.yaml"):
                logger.info(f"Found {kind}-template {match.name} for {self.name}")
                templates[kind][match.name] = match

        return templates
Exemplo n.º 13
0
def validate_release_configs(ctx):
    envs = list_envs()
    for env in envs:
        logger.info("Validating configs for {} environment".format(env))
        settings = load_env_settings(env)
        components = settings.COMPONENTS

        for path in components:
            component = Component(path)
            component.validate(ctx)
            component.patch_from_env(env)
            component.validate(ctx)
Exemplo n.º 14
0
def release_env(ctx: Context, env, dry_run=False):
    env_path = Path("envs") / env

    secrets = sorted([
        secret_file for secret_file in (env_path / "secrets").glob("*.yaml")
        if not secret_file.name.endswith(UNSEALED_SECRETS_EXTENSION)
    ])

    for secret in secrets:
        # Sealed Secrets can't be validated like this
        # ctx.run(f"kubeval {secret}")
        if dry_run:
            logger.info(f"[DRY RUN] Applying {secret}")
            continue

        logger.info(f"Applying {secret}")
        run(["kubectl", "apply", "-f", secret])

    old_secrets = (env_path / "secrets" / "obsolete").glob("*.yaml")
    for secret in sorted(old_secrets, reverse=True):
        if dry_run:
            logger.info(f"[DRY RUN] Deleting {secret}")
            continue

        logger.info(f"Deleting {secret}")
        run(["kubectl", "delete", "-f", secret])
Exemplo n.º 15
0
    def _get_obsolete_kube_configs(self, path=None):
        if path is None:
            path = self.path

        obs_path = path / "kube" / "obsolete"

        configs = {}
        if not obs_path.exists():
            return configs

        for match in obs_path.glob("*.yaml"):
            logger.info(f"Found obsoleted kube config {match.name} for {self.name}")
            configs[match.name] = match

        return configs
Exemplo n.º 16
0
    def _restart_resource(
        self, ctx: Context, resource: str, dry_run: bool, no_rollout_wait: bool
    ):
        if dry_run:
            logger.info(f"[DRY RUN] Restarting resource {resource}")
            return

        logger.info(f"Restarting resource {resource}")
        run(["kubectl", "-n", self.namespace, "rollout", "restart", resource])

        if not no_rollout_wait:
            run(
                ["kubectl", "-n", self.namespace, "rollout", "status", resource],
                timeout=ROLLOUT_TIMEOUT,
            )
Exemplo n.º 17
0
def run(args,
        cwd=None,
        check=True,
        env=None,
        stream=False,
        timeout=None,
        input=None) -> subprocess.CompletedProcess:
    """
    Run a command

    :param List[str] args:
    :param str cwd:
    :param bool check:
    :param dict env:
    :param bool stream: If the output should be streamed instead of captured
    :param float timeout: Seconds to wait before failing
    :param bytes input: Data to be sent to the child process via stdin
    :raises subprocess.CalledProcessError:
    :raises subprocess.TimeoutExpired:
    :return subprocess.CompletedProcess:
    """
    # Convert Paths to strings
    for index, value in enumerate(args):
        args[index] = str(value)
    logger.info("  " + " ".join(args))

    kwargs = {"cwd": cwd, "check": check, "env": env, "input": input}

    if not stream:
        kwargs["stdout"] = subprocess.PIPE
        kwargs["stderr"] = subprocess.PIPE

    if timeout:
        kwargs["timeout"] = timeout

    start = time()
    try:
        res = subprocess.run(args, **kwargs)  # nosec
    except subprocess.CalledProcessError as e:
        logger.error("Failed to run " + " ".join(args))
        log_subprocess_output(e, logger.error)
        logger.error(f"  ✘ ... failed in {time() - start:.3f}s")
        raise
    else:
        log_subprocess_output(res, logger.debug)
        logger.info(f"  ✔ ... done in {time() - start:.3f}s")
        return res
Exemplo n.º 18
0
    def _try_post_release(
        self, ctx: Context, resource: str, selector: str, dry_run: bool
    ):
        if dry_run:
            logger.info(f"[DRY RUN] Running post-release.sh for {resource}")
            return

        result = run(
            [
                "kubectl",
                "-n",
                self.namespace,
                "get",
                "pods",
                "-l",
                selector,
                "-o",
                "json",
            ]
        )

        pods = []
        image = self._get_full_docker_name()
        for pod in json.loads(result.stdout)["items"]:
            for container in pod["spec"]["containers"]:
                if container["image"] == image:
                    pods.append(pod["metadata"]["name"])

        if not pods:
            raise Exception(f"No running pods with correct image found for {resource}")

        pod = random.choice(pods)  # nosec
        run(
            [
                "kubectl",
                "-n",
                self.namespace,
                "exec",
                "-it",
                pod,
                "sh",
                "post-release.sh",
            ],
            check=False,
        )
Exemplo n.º 19
0
    def _patch_image_pull_secrets(self, doc: dict):
        spec = doc["spec"]
        containers = spec["template"]["spec"]["containers"]
        image = ""
        if self.image:
            image = self.image
        else:
            for container in containers:
                image, _ = container["image"].split(":")
                break

        if "/" in image:
            host, _ = image.split("/", maxsplit=1)
            if host in self.image_pull_secrets:
                secret = self.image_pull_secrets[host]
                logger.info(f"Patching imagePullSecrets to {secret}")
                tpl_spec = spec["template"]["spec"]
                tpl_spec["imagePullSecrets"] = [{"name": secret}]
Exemplo n.º 20
0
def seal_secrets(env: str, only_changed=False) -> None:
    """
    Base64 encodes and seals the secrets for the desired env.

    :param str env: The environment.
    :param bool only_changed: Reseal only changed secrets.
    """
    # Validate env
    load_env_settings(env)

    secrets_pem = secrets_pem_path(env=env)

    unsealed_secret_files = (Path("envs") / env /
                             "secrets").glob(f"*{UNSEALED_SECRETS_EXTENSION}")

    label(logger.info, f"Sealing secrets for {env}")

    for input_file in unsealed_secret_files:
        output_file_name = input_file.name[:-len(UNSEALED_SECRETS_EXTENSION
                                                 )] + ".yaml"
        output_file = input_file.with_name(output_file_name)

        logger.info(f"Sealing {input_file} as {output_file}")

        content = input_file.read_text(encoding="utf-8")
        content = base64_encode_secrets(content)
        sealed_content = kube_seal(content, cert=secrets_pem)

        if only_changed and output_file.exists():
            master_key = get_master_key(env=env)
            sealed_original_content = output_file.read_text(encoding="utf-8")
            original_content = kube_unseal(sealed_original_content,
                                           master_key,
                                           cert=secrets_pem)
            sealed_content = _revert_unchanged_secrets(
                content, sealed_content, original_content,
                sealed_original_content)
        else:
            # Load and dump yaml to ensure consistent formatting with above
            sealed_content = yaml.safe_dump(yaml.safe_load(sealed_content))

        output_file.write_text(sealed_content, encoding="utf-8")
Exemplo n.º 21
0
    def _patch_yaml_docs(self, config: List[dict]):
        processed = []
        for doc in config:
            kind = doc["kind"]

            if kind in SKIP_PATCH_KUBE_KINDS:
                logger.info(f"Skipping {kind} patching")
                continue

            self._patch_generic(doc)
            if kind == "Deployment":
                self._patch_deployment(doc)
            elif kind == "DaemonSet":
                self._patch_daemon_set(doc)
            elif kind == "StatefulSet":
                self._patch_stateful_set(doc)

            processed.append(doc)

        return processed
Exemplo n.º 22
0
def get_master_key(env: str, use_existing=True) -> Path:
    """
    Get the master key for SealedSecrets for the given env.

    :param str env: The environment
    :param bool use_existing: If set to True, tries to use existing key from filesystem
    instead of fetching a new one from the cluster.
    :return Path: The path to the master key
    """
    settings = load_env_settings(env)
    master_key_file = master_key_path(env=env)
    if use_existing and master_key_file.exists():
        return master_key_file

    ensure_context(settings.KUBE_CONTEXT)

    label(logger.info, f"Getting master key for {env}")

    # Based on:
    # https://github.com/bitnami-labs/sealed-secrets#how-can-i-do-a-backup-of-my-sealedsecrets
    result = run([
        "kubectl",
        "get",
        "secret",
        "-n",
        "kube-system",
        "-l",
        "sealedsecrets.bitnami.com/sealed-secrets-key",
        "-o",
        "yaml",
    ])

    content = result.stdout.decode(encoding="utf-8")

    logger.info(f"Saving master key to {master_key_file}")

    master_key_file.write_text(content, encoding="utf-8")
    return master_key_file
Exemplo n.º 23
0
 def _patch_containers(self, doc: dict):
     logger.info("Patching containers")
     containers = doc["spec"]["template"]["spec"]["containers"]
     for container in containers:
         image, tag = container["image"].split(":")
         if self.image:
             logger.info(f"Patching image from {image} to {self.image}")
             image = self.image
         if self.tag:
             logger.info(f"Patching tag from {tag} to {self.tag}")
             tag = self.tag
         container["image"] = f"{image}:{tag}"
Exemplo n.º 24
0
    def build(self, ctx: Context, dry_run=False):
        label(logger.info, f"Building {self.path}")
        dockerfile = self.path / "Dockerfile"

        if not dockerfile.exists():
            logger.info(f"No Dockerfile for {self.name} component")
            return

        if dry_run:
            logger.info(f"[DRY RUN] Building {self.name} Docker image")
        else:
            logger.info(f"Building {self.name} Docker image")
            tag = self._get_full_docker_name()
            run(["docker", "build", self.path, "-t", tag], stream=True)
Exemplo n.º 25
0
def release_env(ctx: Context, env, dry_run=False):
    env_path = Path("envs") / env

    secrets = (env_path / "secrets").glob("*.yaml")
    for secret in sorted(secrets):
        # Sealed Secrets can't be validated like this
        # ctx.run(f"kubeval {secret}")
        if dry_run:
            logger.info(f"[DRY RUN] Applying {secret}")
            continue

        logger.info(f"Applying {secret}")
        run(["kubectl", "apply", "-f", secret])

    old_secrets = (env_path / "secrets" / "obsolete").glob("*.yaml")
    for secret in sorted(old_secrets, reverse=True):
        if dry_run:
            logger.info(f"[DRY RUN] Deleting {secret}")
            continue

        logger.info(f"Deleting {secret}")
        run(["kubectl", "delete", "-f", secret])
Exemplo n.º 26
0
    def _patch_yaml_docs(self, config: List[dict]):
        processed = []
        for doc in config:
            kind = doc["kind"]

            if kind in SKIP_PATCH_KUBE_KINDS:
                logger.info(f"Skipping {kind} patching")
                continue

            self._patch_generic(doc)
            if kind in ("Deployment", "DaemonSet", "StatefulSet"):
                logger.info(f"Patching found {kind}")
                self._patch_extra(doc)
            elif kind == "CronJob":
                logger.info(f"Patching found {kind}")
                self._patch_cronjob(doc)

            processed.append(doc)

        return processed
Exemplo n.º 27
0
    def build(self, ctx: Context, dry_run=False, docker_args=None):
        label(logger.info, f"Building {self.path}")
        dockerfile = self.path / "Dockerfile"

        if not dockerfile.exists():
            logger.info(f"No Dockerfile for {self.name} component")
            return

        build_args = []
        if docker_args:
            # Insert --build-arg before each item from docker_args.
            for docker_arg in docker_args:
                build_args.extend(["--build-arg", docker_arg])

        if dry_run:
            logger.info(f"[DRY RUN] Building {self.name} Docker image")
        else:
            logger.info(f"Building {self.name} Docker image")
            tag = self._get_full_docker_name()
            run(["docker", "build", *build_args, self.path, "-t", tag],
                stream=True)
Exemplo n.º 28
0
    def _prepare_configs(self, dst: Path):
        dst = dst / self.path
        kube_dst = dst / "kube"
        kube_dst.mkdir(mode=0o700, parents=True)
        logger.info(f"Writing configs to {dst}")

        dockerfile = self.path / "Dockerfile"
        if dockerfile.exists():
            logger.info("Copying Dockerfile")
            copy(dockerfile, dst / "Dockerfile")

        for config in self.kube_configs:
            config_file = self.path / "kube" / config
            src = self.kube_configs[config]  # Incl. env patch
            logger.info(f"Patching {config_file}")
            with src.open("r") as f:
                docs = list(yaml.load_all(f, Loader))

            self._patch_yaml_docs(docs)

            if config in self.kube_merges:
                # Use the Loader to get the values with the actual types.
                with self.kube_merges[config].open("r") as f:
                    overrides = list(yaml.load_all(f, Loader))
                # Use the BaseLoader to get literal values, such as tilde (~).
                with self.kube_merges[config].open("r") as f:
                    base_overrides = list(yaml.load_all(f, BaseLoader))
                docs = merge_docs(docs, overrides, base_overrides)

            dst_path = kube_dst / config
            with dst_path.open("w") as config_dst:
                yaml.dump_all(docs, stream=config_dst, Dumper=Dumper)

            self.kube_configs[config] = dst_path

        self.path = dst
Exemplo n.º 29
0
def release(
    ctx,
    env,
    component=None,
    image=None,
    tag=None,
    replicas=None,
    dry_run=False,
    keep_configs=False,
    no_rollout_wait=False,
):
    tags: dict = {}
    images: dict = {}
    replica_counts: dict = {}
    components: List[str] = []

    if image:
        for i in image:
            path, value = i.split("=")
            images[path] = value

    if tag:
        for t in tag:
            path, value = t.split("=")
            tags[path] = value

    if replicas:
        for r in replicas:
            path, value = r.split("=")
            replica_counts[path] = value

    rel_id = generate_release_id()
    big_label(logger.info, f"Release {rel_id} to {env} environment starting")
    settings = load_env_settings(env)

    if component:
        components = component
    else:
        components = settings.COMPONENTS

    # Override env settings for replicas
    if replica_counts:
        for path in replica_counts:
            settings.REPLICAS[path] = replica_counts[path]

    rel_path = RELEASE_TMP / rel_id

    logger.info("")
    logger.info("Releasing components:")
    for component in components:
        logger.info(f" - {component}")

    logger.info("")
    logger.info("Setting images and tags:")
    for path in components:
        tag = "(default)"
        image = "(default)"

        if path in tags:
            tag = tags[path]
        if path in images:
            image = images[path]

        logger.info(f" - {path} = {image}:{tag}")
    logger.info("")

    ensure_context(settings.KUBE_CONTEXT)
    ensure_namespace(settings.KUBE_NAMESPACE)
    release_env(ctx, env, dry_run)

    for path in components:
        logger.info("")
        label(logger.info, f"Releasing component {path}")

        component = Component(path)
        if path in images:
            component.image = images[path]
            images.pop(path)
        if path in tags:
            component.tag = tags[path]
            tags.pop(path)
        if path in settings.REPLICAS:
            component.replicas = settings.REPLICAS[path]
            replica_counts.pop(path, None)

        component.namespace = settings.KUBE_NAMESPACE
        component.context = settings.KUBE_CONTEXT
        component.image_pull_secrets = settings.IMAGE_PULL_SECRETS

        component.patch_from_env(env)
        component.validate(ctx)

        component.release(ctx, rel_path, dry_run, no_rollout_wait)

    if images:
        logger.error("Unprocessed image configurations:")
        for path in images:
            logger.error(f" - {path}={images[path]}")

    if tags:
        logger.error("Unprocessed tag configurations:")
        for path in tags:
            logger.error(f" - {path}={tags[path]}")

    if replica_counts:
        logger.error("Unprocessed replica configurations:")
        for path in replica_counts:
            logger.error(f" - {path}={replica_counts[path]}")

    if not keep_configs:
        logger.info(f"Removing temporary configurations from {rel_path}")
        if rel_path.exists():
            rmtree(rel_path)
Exemplo n.º 30
0
def init_kubernetes(ctx, env):
    """
    Initialize Kubernetes cluster
    :param Context ctx:
    :param str env:
    :return:
    """
    label(logger.info, f"Initializing Kubernetes for {env}")

    settings = load_env_settings(env)
    devops.tasks.ensure_context(settings.KUBE_CONTEXT)
    devops.tasks.ensure_namespace(settings.KUBE_NAMESPACE)

    def _get_kube_files(kube_context):
        kube_files = {f.name: f for f in Path("kube").glob("*.yaml")}

        overrides = (Path("kube") / kube_context / "overrides").glob("*.yaml")
        for f in overrides:
            kube_files[f.name] = f

        # Convert to sorted list
        kube_files = [kube_files[name] for name in sorted(kube_files.keys())]
        return kube_files

    def _apply(config, **kwargs):
        run(["kubectl", "apply", "-f", config], **kwargs)

    secrets = Path("envs") / env / "secrets.pem"
    if env == LOCAL_ENV:
        # Make sure local Sealed Secrets master key is applied first
        master_key = Path("envs") / env / "secrets.key"
        if master_key.exists():
            logger.info(
                f"Applying Sealed Secrets master key from {master_key}")
            _apply(master_key, check=False)

    for c in _get_kube_files(settings.KUBE_CONTEXT):
        _apply(c)

    # Wait for Sealed Secrets -controller to start up
    run([
        "kubectl",
        "rollout",
        "status",
        "--namespace",
        "kube-system",
        "deploy/sealed-secrets-controller",
    ])

    # And try to dump the signing cert
    logger.info("Trying to fetch Sealed Secrets signing cert")
    attempts = 5
    while True:
        try:
            res = run(["kubeseal", "--fetch-cert"])
        except CalledProcessError:
            attempts -= 1
            if attempts <= 0:
                raise Exception("Failed to fetch Sealed Secrets cert")

            sleep(2)
            continue

        with secrets.open("w") as dst:
            dst.write(res.stdout.decode("utf-8"))

        break

    if env == LOCAL_ENV:
        # Store master key if needed
        master_key = Path("envs") / env / "secrets.key"
        if not master_key.exists():
            logger.info("Trying to store Sealed Secrets master key")
            res = run([
                "kubectl",
                "get",
                "secret",
                "--namespace",
                "kube-system",
                "-o",
                "custom-columns=name:metadata.name",
            ])
            secrets = []
            for line in res.stdout.decode("utf-8").splitlines():
                if line.startswith("sealed-secrets-key"):
                    secrets.append(line)

            with master_key.open("w") as dst:
                first = True
                for secret in secrets:
                    if not first:
                        dst.write("---\n")
                    first = False
                    res = run([
                        "kubectl",
                        "get",
                        "secret",
                        "--namespace",
                        "kube-system",
                        secret,
                        "-o",
                        "yaml",
                    ])
                    print(res.stdout)
                    dst.write(res.stdout.decode("utf-8") + "\n")