Example #1
0
def destroy_team(context: "Context", team_context: "TeamContext") -> None:
    _logger.debug("Stack name: %s", team_context.stack_name)
    if cfn.does_stack_exist(stack_name=context.toolkit.stack_name):
        if cfn.does_stack_exist(stack_name=team_context.stack_name):
            args: List[str] = [context.name, team_context.name]
            cdk.destroy(
                context=context,
                stack_name=team_context.stack_name,
                app_filename=os.path.join(ORBIT_CLI_ROOT, "remote_files",
                                          "cdk", "team.py"),
                args=args,
            )

        _logger.debug(
            "Team specific post_hook execute to destroy the cfn resources")
        _logger.debug(f"team_context.plugins={team_context.plugins}")
        for plugin in team_context.plugins:
            _logger.debug(f"post hook plugin={plugin}")
            if plugin.plugin_id == "custom_cfn":
                hook: plugins.HOOK_TYPE = plugins.PLUGINS_REGISTRIES.get_hook(
                    context=context,
                    team_name=team_context.name,
                    plugin_name=plugin.plugin_id,
                    hook_name="post_hook",
                )
                if hook is not None:
                    _logger.debug(
                        f"Found post hook for team {team_context.name} plugin {plugin.plugin_id}"
                    )
                    hook(plugin.plugin_id, context, team_context,
                         plugin.parameters)
def destroy_env(env: str, debug: bool) -> None:
    with MessagesContext("Destroying", debug=debug) as msg_ctx:
        ssm.cleanup_changeset(env_name=env)
        ssm.cleanup_manifest(env_name=env)

        if ssm.does_parameter_exist(name=f"/orbit/{env}/context") is False:
            msg_ctx.info(f"Environment {env} not found. Destroying only possible remaining resources.")
            elb.delete_load_balancers(env_name=env)
            destroy_remaining_resources(env_name=env, top_level="orbit")
            msg_ctx.progress(100)
            return

        context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env, type=Context)
        msg_ctx.info("Context loaded")
        msg_ctx.info(f"Teams: {','.join([t.name for t in context.teams])}")
        msg_ctx.progress(2)

        if any(cfn.does_stack_exist(stack_name=t.stack_name) for t in context.teams):
            msg_ctx.error("Found Teams dependent on the Envrionment.")
            return

        if (
            cfn.does_stack_exist(stack_name=context.env_stack_name)
            or cfn.does_stack_exist(stack_name=context.toolkit.stack_name)
            or cfn.does_stack_exist(stack_name=context.cdk_toolkit.stack_name)
        ):
            bundle_path = bundle.generate_bundle(command_name="destroy", context=context)
            msg_ctx.progress(5)

            buildspec = codebuild.generate_spec(
                context=context,
                plugins=True,
                cmds_build=[f"orbit remote --command destroy_env {env}"],
                changeset=None,
            )
            remote.run(
                command_name="destroy",
                context=context,
                bundle_path=bundle_path,
                buildspec=buildspec,
                codebuild_log_callback=msg_ctx.progress_bar_callback,
                timeout=45,
            )

        msg_ctx.info("Env destroyed")
        msg_ctx.progress(95)

        try:
            destroy_toolkit(env_name=context.name)
        except botocore.exceptions.ClientError as ex:
            error = ex.response["Error"]
            if "does not exist" not in error["Message"]:
                raise
            _logger.debug(f"Skipping toolkit destroy: {error['Message']}")
        msg_ctx.info("Toolkit destroyed")
        ssm.cleanup_env(env_name=context.name)

        msg_ctx.progress(100)
def destroy_foundation(env: str, debug: bool) -> None:
    with MessagesContext("Destroying", debug=debug) as msg_ctx:
        ssm.cleanup_changeset(env_name=env, top_level="orbit-foundation")
        ssm.cleanup_manifest(env_name=env, top_level="orbit-foundation")

        if ssm.does_parameter_exist(name=f"/orbit-foundation/{env}/context") is False:
            msg_ctx.info(f"Foundation {env} not found. Destroying only possible remaining resources.")
            destroy_remaining_resources(env_name=env, top_level="orbit-foundation")
            msg_ctx.progress(100)
            return

        context: "FoundationContext" = ContextSerDe.load_context_from_ssm(env_name=env, type=FoundationContext)
        msg_ctx.info("Context loaded")
        msg_ctx.progress(2)

        msg_ctx.progress(4)

        if (
            cfn.does_stack_exist(stack_name=cast(str, context.stack_name))
            or cfn.does_stack_exist(stack_name=context.toolkit.stack_name)
            or cfn.does_stack_exist(stack_name=context.cdk_toolkit.stack_name)
        ):
            bundle_path = bundle.generate_bundle(command_name="destroy", context=cast(Context, context))
            msg_ctx.progress(5)

            buildspec = codebuild.generate_spec(
                context=cast(Context, context),
                plugins=False,
                cmds_build=[f"orbit remote --command destroy_foundation {env}"],
                changeset=None,
            )
            remote.run(
                command_name="destroy",
                context=cast(Context, context),
                bundle_path=bundle_path,
                buildspec=buildspec,
                codebuild_log_callback=msg_ctx.progress_bar_callback,
                timeout=45,
            )
        msg_ctx.info("Foundation destroyed")
        msg_ctx.progress(95)

        try:
            destroy_toolkit(env_name=context.name, top_level="orbit-foundation")
        except botocore.exceptions.ClientError as ex:
            error = ex.response["Error"]
            if "does not exist" not in error["Message"]:
                raise
            _logger.debug(f"Skipping toolkit destroy: {error['Message']}")
        msg_ctx.info("Toolkit destroyed")
        ssm.cleanup_env(env_name=context.name, top_level="orbit-foundation")

        msg_ctx.progress(100)
def destroy_env(env: str, preserve_credentials: bool, debug: bool) -> None:
    with MessagesContext("Destroying", debug=debug) as msg_ctx:
        ssm.cleanup_changeset(env_name=env)
        ssm.cleanup_manifest(env_name=env)

        if ssm.does_parameter_exist(name=f"/orbit/{env}/context") is False:
            msg_ctx.info(
                f"Environment {env} not found. Destroying only possible remaining resources."
            )
            elb.delete_load_balancers(env_name=env)
            destroy_remaining_resources(env_name=env, top_level="orbit")
            msg_ctx.progress(100)
            return

        msg_ctx.progress(15)
        context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env,
                                                                type=Context)
        msg_ctx.info("Context loaded")
        msg_ctx.info(f"Teams: {','.join([t.name for t in context.teams])}")

        if any(
                cfn.does_stack_exist(stack_name=t.stack_name)
                for t in context.teams):
            raise click.ClickException(
                "Found Teams dependent on the Envrionment.")

        if (cfn.does_stack_exist(stack_name=context.env_stack_name)
                or cfn.does_stack_exist(stack_name=context.toolkit.stack_name)
                or cfn.does_stack_exist(
                    stack_name=context.cdk_toolkit.stack_name)):
            msg_ctx.progress(50)
            destroy.destroy_env(env_name=context.name)

        if not preserve_credentials:
            secretsmanager.delete_docker_credentials(
                secret_id=f"orbit-{context.name}-docker-credentials")
            _logger.info("Removed docker credentials from SecretsManager")

        try:
            if context.cdk_toolkit.s3_bucket:
                s3.delete_bucket(bucket=context.cdk_toolkit.s3_bucket)
        except Exception as ex:
            _logger.debug(
                "Skipping Environment CDK Toolkit bucket deletion. Cause: %s",
                ex)

        msg_ctx.info("Env destroyed leaving the Env toolkit")

        msg_ctx.progress(100)
def destroy(plugin_id: str, context: "Context", team_context: "TeamContext", parameters: Dict[str, Any]) -> None:
    _logger.debug("Destroying Custom CloudFormation  plugin resources for team %s", team_context.name)
    _logger.debug("Team Env name: %s | Team name: %s", context.name, team_context.name)
    env_name = context.name
    acct: str = context.account_id
    deploy_id: str = cast(str, context.toolkit.deploy_id)
    plugin_id = plugin_id.replace("_", "-")
    stack_name = f"orbit-{context.name}-{team_context.name}-{plugin_id}-custom-demo-resources"
    _logger.debug(f"stack_name={stack_name}")
    bucket_names: Dict[str, Any] = {
        "lake-bucket": f"orbit-{env_name}-demo-lake-{acct}-{deploy_id}",
        "secured-lake-bucket": f"orbit-{env_name}-secured-demo-lake-{acct}-{deploy_id}",
    }
    _logger.debug(f"bucket_names={bucket_names}")
    # CDK skips bucket deletion.
    if cfn.does_stack_exist(stack_name=stack_name):
        try:
            _logger.debug("Deleting lake-bucket")
            s3.delete_bucket(bucket=bucket_names["lake-bucket"])
        except Exception as ex:
            _logger.debug("Skipping Team Lake Bucket deletion. Cause: %s", ex)
        try:
            _logger.debug("Deleting secured-lake-bucket")
            s3.delete_bucket(bucket=bucket_names["secured-lake-bucket"])
        except Exception as ex:
            _logger.debug("Skipping Team Secured Lake Bucket deletion. Cause: %s", ex)

    _logger.debug("Destroying custom resources using post hook")
    cfn.destroy_stack(stack_name=stack_name)
    _logger.debug("Destroyed")
Example #6
0
def destroy_env(context: "Context") -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        sh.run(
            f"eksctl utils write-kubeconfig --cluster orbit-{context.name} --set-kubeconfig-context"
        )
        k8s_context = get_k8s_context(context=context)
        _logger.debug("kubectl k8s_context: %s", k8s_context)
        try:
            # Here we remove some finalizers that can cause our delete to hang indefinitely
            try:
                sh.run(
                    "kubectl patch crd/trainingjobs.sagemaker.aws.amazon.com "
                    '--patch \'{"metadata":{"finalizers":[]}}\' --type=merge'
                    f" --context {k8s_context}")
            except FailedShellCommand:
                _logger.debug("Ignoring patch failure")

            output_path = _generate_orbit_system_manifest(context=context)
            sh.run(f"kubectl delete -f {output_path} --grace-period=0 --force "
                   f"--ignore-not-found --wait --context {k8s_context}")
            output_paths = _generate_orbit_system_kustomizations(
                context=context, clean_up=True)
            for output_path in output_paths:
                sh.run(
                    f"kubectl delete -k {output_path} --grace-period=0 --force "
                    f"--ignore-not-found --wait --context {k8s_context}")

        except exceptions.FailedShellCommand as ex:
            _logger.debug("Skipping: %s", ex)
            pass  # Let's leave for eksctl, it will destroy everything anyway...
def deploy_team(context: Context, team_context: TeamContext) -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        repo_location = init_team_repo(context=context,
                                       team_context=team_context)
        repo = team_context.name
        add_repo(repo=repo, repo_location=repo_location)
        kubectl.write_kubeconfig(context=context)

        team_charts_path = create_team_charts_copy(team_context=team_context,
                                                   path=os.path.join(
                                                       CHARTS_PATH, "team"))

        chart_name, chart_version, chart_package = package_chart(
            repo=repo,
            chart_path=os.path.join(team_charts_path, "jupyter-hub"),
            values={
                "team":
                team_context.name,
                "efsid":
                context.shared_efs_fs_id,
                "region":
                context.region,
                "ssl_cert_arn":
                context.networking.frontend.ssl_cert_arn,
                "env_name":
                context.name,
                "jupyter_hub_repository":
                context.images.jupyter_hub.repository,
                "jupyter_hub_tag":
                context.images.jupyter_hub.version,
                "jupyter_user_repository":
                context.images.jupyter_user.repository,
                "jupyter_user_tag":
                context.images.jupyter_user.version,
                "grant_sudo":
                '"yes"' if team_context.grant_sudo else '"no"',
                "internal_load_balancer":
                '"false"' if context.networking.frontend.load_balancers_subnets
                else '"true"',
                "jupyterhub_inbound_ranges":
                str(team_context.jupyterhub_inbound_ranges if team_context.
                    jupyterhub_inbound_ranges else
                    [utils.get_dns_ip_cidr(context=context)]),
                "sts_ep":
                "legacy"
                if context.networking.data.internet_accessible else "regional",
                "image_pull_policy":
                "Always"
                if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent",
            },
        )
        install_chart(
            repo=repo,
            namespace=team_context.name,
            name=f"{team_context.name}-jupyter-hub",
            chart_name=chart_name,
            chart_version=chart_version,
        )
Example #8
0
def destroy_kubeflow(context: Context) -> None:
    stack_name: str = f"orbit-{context.name}"
    final_eks_stack_name: str = f"eksctl-{stack_name}-cluster"
    _logger.debug("EKSCTL stack name: %s", final_eks_stack_name)

    if cfn.does_stack_exist(stack_name=final_eks_stack_name):
        kubectl.write_kubeconfig(context=context)

        for line in sh.run_iterating("kubectl get namespace kubeflow"):
            if '"kubeflow" not found' in line:
                return

        cluster_name = f"orbit-{context.name}"
        output_path = os.path.join(".orbit.out", context.name, "kubeflow",
                                   cluster_name)
        gen_kubeflow_config(context, output_path, cluster_name)

        _logger.debug("Destroying Kubeflow")
        output_path = os.path.abspath(output_path)
        _logger.debug(f"kubeflow config dir: {output_path}")
        utils.print_dir(output_path)

        timeouts = 0
        while timeouts < 3:
            try:
                _logger.info("Deleting kubeflow resources")
                sh.run("./delete_kf.sh", cwd=output_path)
            except FailedShellCommand:
                _logger.info(
                    "The command returned a non-zero exit code. Retrying to delete resources"
                )
                timeouts += 1
                time.sleep(300)
Example #9
0
def deploy_env(context: Context) -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        repo_location = init_env_repo(context=context)
        repo = context.name
        add_repo(repo=repo, repo_location=repo_location)
        kubectl.write_kubeconfig(context=context)

        chart_name, chart_version, chart_package = package_chart(
            repo=repo,
            chart_path=os.path.join(CHARTS_PATH, "env", "landing-page"),
            values={
                "region": context.region,
                "env_name": context.name,
                "user_pool_id": context.user_pool_id,
                "user_pool_client_id": context.user_pool_client_id,
                "identity_pool_id": context.identity_pool_id,
                "ssl_cert_arn": context.networking.frontend.ssl_cert_arn,
                "repository": context.images.landing_page.repository,
                "tag": context.images.landing_page.version,
                "cognito_external_provider": context.cognito_external_provider,
                "cognito_external_provider_label": context.cognito_external_provider
                if context.cognito_external_provider_label is None
                else context.cognito_external_provider_label,
                "cognito_external_provider_domain": "null"
                if context.cognito_external_provider_domain is None
                else context.cognito_external_provider_domain,
                "cognito_external_provider_redirect": "null"
                if context.cognito_external_provider_redirect is None
                else context.cognito_external_provider_redirect,
                "internal_load_balancer": '"false"' if context.networking.frontend.load_balancers_subnets else '"true"',
                "image_pull_policy": "Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent",
            },
        )
        install_chart(
            repo=repo, namespace="env", name="landing-page", chart_name=chart_name, chart_version=chart_version
        )

        if context.install_image_replicator or not context.networking.data.internet_accessible:
            chart_name, chart_version, chart_package = package_chart(
                repo=repo,
                chart_path=os.path.join(CHARTS_PATH, "env", "image-replicator"),
                values={
                    "region": context.region,
                    "account_id": context.account_id,
                    "env_name": context.name,
                    "repository": context.images.image_replicator.repository,
                    "tag": context.images.image_replicator.version,
                    "sts_ep": "legacy" if context.networking.data.internet_accessible else "regional",
                    "image_pull_policy": "Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent",
                },
            )
            install_chart(
                repo=repo,
                namespace="kube-system",
                name="image-replicator",
                chart_name=chart_name,
                chart_version=chart_version,
            )
Example #10
0
def destroy_teams(context: "Context") -> None:
    stack_name: str = f"orbit-{context.name}"
    final_eks_stack_name: str = f"eksctl-{stack_name}-cluster"
    _logger.debug("EKSCTL stack name: %s", final_eks_stack_name)
    cluster_name = f"orbit-{context.name}"
    if cfn.does_stack_exist(stack_name=final_eks_stack_name) and context.teams:
        for team in context.teams:
            eks.delete_fargate_profile(
                profile_name=f"orbit-{context.name}-{team.name}",
                cluster_name=cluster_name,
            )

            username = f"orbit-{context.name}-{team.name}-runner"
            arn = f"arn:aws:iam::{context.account_id}:role/{username}"
            for line in sh.run_iterating(f"eksctl get iamidentitymapping --cluster {cluster_name} --arn {arn}"):
                if line == f'Error: no iamidentitymapping with arn "{arn}" found':
                    _logger.debug(f"Skipping non-existent IAM Identity Mapping - Role: {arn}")
                    break
            else:
                _logger.debug(f"Removing IAM Identity Mapping - Role: {arn}")
                sh.run(f"eksctl delete iamidentitymapping --cluster {cluster_name} --arn {arn}")

            username = f"orbit-{context.name}-{team.name}"
            arn = cast(str, team.eks_pod_role_arn)
            for line in sh.run_iterating(f"eksctl get iamidentitymapping --cluster {cluster_name} --arn {arn}"):
                if line == f'Error: no iamidentitymapping with arn "{arn}" found':
                    _logger.debug(f"Skipping non-existent IAM Identity Mapping - Role: {arn}")
                    break
            else:
                _logger.debug(f"Removing IAM Identity Mapping - Role: {arn}")
                sh.run(f"eksctl delete iamidentitymapping --cluster {cluster_name} --arn {arn}")

        _logger.debug("EKSCTL Teams destroyed")
def deploy_credentials(filename: str, username: str, password: str, registry: str, debug: bool) -> None:
    with MessagesContext("Deploying", debug=debug) as msg_ctx:
        msg_ctx.progress(2)

        manifest: "Manifest" = ManifestSerDe.load_manifest_from_file(filename=filename, type=Manifest)
        msg_ctx.info(f"Manifest loaded: {filename}")
        msg_ctx.progress(3)

        context_parameter_name: str = f"/orbit/{manifest.name}/context"
        if not ssm.does_parameter_exist(name=context_parameter_name):
            msg_ctx.error(f"Orbit Environment {manifest.name} cannot be found in the current account and region.")
            return

        context: "Context" = ContextSerDe.load_context_from_manifest(manifest=manifest)
        msg_ctx.info("Current Context loaded")
        msg_ctx.progress(4)

        msg_ctx.info("Encrypting credentials with Toolkit KMS Key")
        ciphertext = kms.encrypt(
            context=context, plaintext=json.dumps({registry: {"username": username, "password": password}})
        )
        msg_ctx.progress(20)

        msg_ctx.info("Starting Remote CodeBuild to deploy credentials")

        deploy.deploy_credentials(env_name=context.name, ciphertext=ciphertext)

        msg_ctx.info("Registry Credentials deployed")
        msg_ctx.progress(98)

        if cfn.does_stack_exist(stack_name=context.env_stack_name):
            context = ContextSerDe.load_context_from_ssm(env_name=manifest.name, type=Context)
            msg_ctx.info(f"Context updated: {filename}")
        msg_ctx.progress(100)
Example #12
0
def deploy_toolkit(
    context: "Context",
    username: Optional[str],
    password: Optional[str],
    msg_ctx: MessagesContext,
    top_level: str = "orbit",
) -> None:
    credential_received: bool = username is not None and password is not None
    stack_exist: bool = cfn.does_stack_exist(
        stack_name=context.toolkit.stack_name)
    credential_exist: bool = dockerhub.does_credential_exist(
        context=context) if stack_exist else False
    image_manifests = [
        cast(ImageManifest, getattr(context.images, i))
        for i in context.images.names
    ]
    credential_required: bool = any([
        im.get_source(account_id=context.account_id,
                      region=context.region) == "dockerhub"
        for im in image_manifests
    ])

    if stack_exist:
        if credential_required and not credential_exist and not credential_received:
            username, password = _request_dockerhub_credential(msg_ctx=msg_ctx)
            dockerhub.store_credential(context=context,
                                       username=username,
                                       password=password)
            credential_exist = True
        elif credential_received:
            dockerhub.store_credential(
                context=context,
                username=cast(str, username),
                password=cast(str, password),
            )
            credential_exist = True
    else:
        context.toolkit.deploy_id = "".join(
            random.choice(string.ascii_lowercase) for i in range(6))
        if credential_required and not credential_received:
            username, password = _request_dockerhub_credential(msg_ctx=msg_ctx)
            credential_exist = False

    msg_ctx.progress(6)
    _logger.debug("context.toolkit.deploy_id: %s", context.toolkit.deploy_id)
    template_filename: str = toolkit.synth(context=context,
                                           top_level=top_level)
    cfn.deploy_template(stack_name=context.toolkit.stack_name,
                        filename=template_filename,
                        env_tag=context.env_tag,
                        s3_bucket=None)
    ContextSerDe.fetch_toolkit_data(context=context)
    ContextSerDe.dump_context_to_ssm(context=context)

    if credential_exist is False:
        dockerhub.store_credential(
            context=context,
            username=cast(str, username),
            password=cast(str, password),
        )
Example #13
0
def deploy_env(context: Context) -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        repo_location = init_env_repo(context=context)
        repo = context.name
        add_repo(repo=repo, repo_location=repo_location)
        kubectl.write_kubeconfig(context=context)
Example #14
0
def build_image(
    env: str,
    dir: Optional[str],
    name: str,
    script: Optional[str],
    build_args: Optional[List[str]],
    timeout: int = 30,
    debug: bool = False,
    source_registry: Optional[str] = None,
    source_repository: Optional[str] = None,
    source_version: Optional[str] = None,
) -> None:
    with MessagesContext("Deploying Docker Image", debug=debug) as msg_ctx:
        context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env, type=Context)
        msg_ctx.info("Manifest loaded")
        if cfn.does_stack_exist(stack_name=f"orbit-{context.name}") is False:
            msg_ctx.error("Please, deploy your environment before deploying any additional docker image")
            return
        msg_ctx.progress(3)
        if dir:
            dirs = [(dir, name)]
        else:
            dirs = []
        bundle_path = bundle.generate_bundle(command_name=f"deploy_image-{name}", context=context, dirs=dirs)
        msg_ctx.progress(5)

        script_str = "NO_SCRIPT" if script is None else script
        source_str = "NO_REPO" if source_registry is None else f"{source_registry} {source_repository} {source_version}"
        build_args = [] if build_args is None else build_args
        buildspec = codebuild.generate_spec(
            context=context,
            plugins=False,
            cmds_build=[
                f"orbit remote --command build_image " f"{env} {name} {script_str} {source_str} {' '.join(build_args)}"
            ],
            changeset=None,
        )
        msg_ctx.progress(6)

        remote.run(
            command_name=f"deploy_image-{name}",
            context=context,
            bundle_path=bundle_path,
            buildspec=buildspec,
            codebuild_log_callback=msg_ctx.progress_bar_callback,
            timeout=timeout,
        )
        msg_ctx.info("Docker Image deploy into ECR")

        address = (
            f"{context.account_id}.dkr.ecr.{context.region}.amazonaws.com/orbit-{context.name}/{name}"
            if name in [n.replace("_", "-") for n in context.images.names]
            else f"{context.account_id}.dkr.ecr.{context.region}.amazonaws.com/orbit-{context.name}/users/{name}"
        )

        msg_ctx.info(f"ECR Image Address={address}")
        msg_ctx.tip(f"ECR Image Address: {stylize(address, underline=True)}")
        msg_ctx.progress(100)
Example #15
0
def destroy_remaining_resources(env_name: str,
                                top_level: str = "orbit") -> None:
    ecr.cleanup_remaining_repos(env_name=env_name)
    s3.delete_bucket_by_prefix(
        prefix=f"{top_level}-{env_name}-cdk-toolkit-{utils.get_account_id()}-")
    env_cdk_toolkit: str = f"{top_level}-{env_name}-cdk-toolkit"
    if cfn.does_stack_exist(stack_name=env_cdk_toolkit):
        cfn.destroy_stack(stack_name=env_cdk_toolkit)
    destroy_toolkit(env_name=env_name)
Example #16
0
def destroy_team(context: Context, team_context: TeamContext) -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        repo_location = init_team_repo(context=context, team_context=team_context)
        repo = team_context.name
        add_repo(repo=repo, repo_location=repo_location)
        kubectl.write_kubeconfig(context=context)
        uninstall_chart(name=f"{team_context.name}-jupyter-hub")
def destroy_toolkit(env_name: str, top_level: str = "orbit") -> None:
    try:
        s3.delete_bucket_by_prefix(prefix=f"{top_level}-{env_name}-toolkit-{utils.get_account_id()}-")
    except Exception as ex:
        _logger.debug("Skipping Toolkit bucket deletion. Cause: %s", ex)
    toolkit_stack_name: str = f"{top_level}-{env_name}-toolkit"
    if cfn.does_stack_exist(stack_name=toolkit_stack_name):
        cfn.destroy_stack(stack_name=toolkit_stack_name)
    ssm.cleanup_env(env_name=env_name, top_level=top_level)
Example #18
0
def deploy_team(context: "Context", team_context: "TeamContext") -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        k8s_context = get_k8s_context(context=context)
        _logger.debug("kubectl context: %s", k8s_context)
        output_path = _generate_team_context(context=context,
                                             team_context=team_context)
        sh.run(
            f"kubectl apply -f {output_path} --context {k8s_context} --wait")
Example #19
0
def deploy_team(context: "Context", team_context: "TeamContext") -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        k8s_context = get_k8s_context(context=context)
        _logger.debug("kubectl context: %s", k8s_context)
        output_path = _generate_team_context(context=context, team_context=team_context)
        output_path = _generate_env_manifest(context=context, clean_up=False)
        sh.run(f"kubectl apply -f {output_path} --context {k8s_context} --wait")
        fetch_kubectl_data(context=context, k8s_context=k8s_context, include_teams=True)
def destroy_foundation(env: str, debug: bool) -> None:
    with MessagesContext("Destroying", debug=debug) as msg_ctx:
        ssm.cleanup_changeset(env_name=env, top_level="orbit-f")
        ssm.cleanup_manifest(env_name=env, top_level="orbit-f")

        if ssm.does_parameter_exist(name=f"/orbit-f/{env}/context") is False:
            msg_ctx.info(
                f"Foundation {env} not found. Destroying only possible remaining resources."
            )
            destroy_remaining_resources(env_name=env, top_level="orbit-f")
            msg_ctx.progress(100)
            return

        context: "FoundationContext" = ContextSerDe.load_context_from_ssm(
            env_name=env, type=FoundationContext)
        msg_ctx.info("Context loaded")
        msg_ctx.progress(25)

        if (cfn.does_stack_exist(stack_name=cast(str, context.stack_name))
                or cfn.does_stack_exist(stack_name=context.toolkit.stack_name)
                or cfn.does_stack_exist(
                    stack_name=context.cdk_toolkit.stack_name)):
            destroy.destroy_foundation(env_name=context.name)

        msg_ctx.info("Foundation destroyed")
        msg_ctx.progress(75)

        try:
            _destroy_toolkit(
                env_name=context.name,
                top_level="orbit-f",
                cdk_toolkit_bucket=context.cdk_toolkit.s3_bucket,
            )
        except botocore.exceptions.ClientError as ex:
            error = ex.response["Error"]
            if "does not exist" not in error["Message"]:
                raise
            _logger.debug(f"Skipping toolkit destroy: {error['Message']}")
        msg_ctx.info("Toolkit destroyed")
        ssm.cleanup_env(env_name=context.name, top_level="orbit-f")

        msg_ctx.progress(100)
def deploy_env(
    filename: str,
    debug: bool,
) -> None:
    with MessagesContext("Deploying", debug=debug) as msg_ctx:
        msg_ctx.progress(2)

        manifest: "Manifest" = ManifestSerDe.load_manifest_from_file(filename=filename, type=Manifest)
        msg_ctx.info(f"Manifest loaded: {filename}")
        msg_ctx.progress(5)

        manifest_dir: str = os.path.dirname(os.path.abspath(filename))
        _logger.debug("manifest directory is set to %s", manifest_dir)

        manifest_validations(manifest)

        context: "Context" = ContextSerDe.load_context_from_manifest(manifest=manifest)

        msg_ctx.info("Current Context loaded")
        msg_ctx.progress(10)

        _logger.debug("Inspecting possible manifest changes...")
        changeset: "Changeset" = extract_changeset(manifest=manifest, context=context, msg_ctx=msg_ctx)
        _logger.debug(f"Changeset:\n{dump_changeset_to_str(changeset=changeset)}")
        msg_ctx.progress(15)

        _deploy_toolkit(
            context=context,
        )
        msg_ctx.info("Toolkit deployed")
        msg_ctx.progress(30)

        deploy.deploy_env(
            env_name=context.name,
            manifest_dir=manifest_dir,
        )

        msg_ctx.info("Orbit Workbench deployed")
        msg_ctx.progress(98)

        if cfn.does_stack_exist(stack_name=context.env_stack_name):
            context = ContextSerDe.load_context_from_manifest(manifest=manifest)
            msg_ctx.info(f"Context updated: {filename}")
        msg_ctx.progress(99)

        if context.cognito_users_url:
            msg_ctx.tip(f"Add users: {stylize(context.cognito_users_url, underline=True)}")
        else:
            RuntimeError("Cognito Users URL not found.")
        if context.landing_page_url:
            msg_ctx.tip(f"Access Orbit Workbench: {stylize(f'{context.landing_page_url}/orbit/login', underline=True)}")
        else:
            RuntimeError("Landing Page URL not found.")
        msg_ctx.progress(100)
Example #22
0
def _deploy_image(
    env: str,
    dir: str,
    name: str,
    script: Optional[str],
    build_args: Optional[List[str]],
    region: Optional[str],
    debug: bool,
) -> None:
    with MessagesContext("Deploying Docker Image", debug=debug) as msg_ctx:
        context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env,
                                                                type=Context)

        if cfn.does_stack_exist(stack_name=f"orbit-{context.name}") is False:
            msg_ctx.error(
                "Please, deploy your environment before deploy any addicional docker image"
            )
            return

        plugins.PLUGINS_REGISTRIES.load_plugins(
            context=context,
            msg_ctx=msg_ctx,
            plugin_changesets=[],
            teams_changeset=None,
        )
        msg_ctx.progress(3)

        bundle_path = bundle.generate_bundle(
            command_name=f"deploy_image-{name}",
            context=context,
            dirs=[(dir, name)])
        msg_ctx.progress(4)
        script_str = "NO_SCRIPT" if script is None else script
        build_args = [] if build_args is None else build_args
        buildspec = codebuild.generate_spec(
            context=context,
            plugins=True,
            cmds_build=[
                f"orbit remote --command _deploy_image {env} {name} {dir} {script_str} {' '.join(build_args)}"
            ],
            changeset=None,
        )
        remote.run(
            command_name=f"deploy_image-{name}",
            context=context,
            bundle_path=bundle_path,
            buildspec=buildspec,
            codebuild_log_callback=msg_ctx.progress_bar_callback,
            timeout=30,
        )
        msg_ctx.info("Docker Image deploy into ECR")
        address = f"{context.account_id}.dkr.ecr.{context.region}.amazonaws.com/orbit-{context.name}-{name}"
        msg_ctx.tip(f"ECR Image Address: {stylize(address, underline=True)}")
        msg_ctx.progress(100)
Example #23
0
def destroy_env(context: "Context") -> None:
    stack_name: str = f"orbit-{context.name}"
    final_eks_stack_name: str = f"eksctl-{stack_name}-cluster"
    _logger.debug("EKSCTL stack name: %s", final_eks_stack_name)
    if cfn.does_stack_exist(stack_name=final_eks_stack_name):
        revoke_cluster_pod_security_group(context=context)

        sh.run(f"eksctl utils write-kubeconfig --cluster orbit-{context.name} --set-kubeconfig-context")
        output_filename = generate_manifest(context=context, name=stack_name, nodegroups=context.managed_nodegroups)
        sh.run(f"eksctl delete cluster -f {output_filename} --wait --verbose 4")
        _logger.debug("EKSCTL Envrionment destroyed")
Example #24
0
def destroy(context: "FoundationContext") -> None:
    if cfn.does_stack_exist(stack_name=cast(str, context.stack_name)):
        cleanup.foundation_remaining_dependencies(context=context)
        cleanup.delete_cert_from_iam(context=context)
        _logger.debug("Destroying Foundation...")
        cdk.destroy(
            context=context,
            stack_name=cast(str, context.stack_name),
            app_filename=os.path.join(ORBIT_CLI_ROOT, "remote_files", "cdk", "foundation.py"),
            args=[context.name],
        )
Example #25
0
def destroy_team(context: Context, team_context: TeamContext) -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name) and team_context.team_helm_repository:
        kubectl.write_kubeconfig(context=context)
        uninstall_all_charts(namespace=team_context.name)
        if team_context.team_helm_repository:
            # delete helm charts for team repo
            sh.run(f"aws s3 rm --recursive {team_context.team_helm_repository}")
        if team_context.user_helm_repository:
            # delete heml charts for user repo
            sh.run(f"aws s3 rm --recursive {team_context.user_helm_repository}")
def deploy_teams(
    filename: str,
    debug: bool,
) -> None:
    with MessagesContext("Deploying", debug=debug) as msg_ctx:
        msg_ctx.progress(2)

        manifest: "Manifest" = ManifestSerDe.load_manifest_from_file(filename=filename, type=Manifest)
        msg_ctx.info(f"Manifest loaded: {filename}")
        msg_ctx.info(f"Teams: {','.join([t.name for t in manifest.teams])}")
        msg_ctx.progress(5)

        manifest_dir: str = os.path.dirname(os.path.abspath(filename))
        _logger.debug("manifest directory is set to %s", manifest_dir)

        context_parameter_name: str = f"/orbit/{manifest.name}/context"
        if not ssm.does_parameter_exist(name=context_parameter_name):
            msg_ctx.error(f"Orbit Environment {manifest.name} cannot be found in the current account and region.")
            return

        context: "Context" = ContextSerDe.load_context_from_manifest(manifest=manifest)
        msg_ctx.info("Current Context loaded")
        msg_ctx.info(f"Teams: {','.join([t.name for t in context.teams])}")
        msg_ctx.progress(10)

        _logger.debug("Inspecting possible manifest changes...")
        changeset: "Changeset" = extract_changeset(manifest=manifest, context=context, msg_ctx=msg_ctx)
        _logger.debug(f"Changeset:\n{dump_changeset_to_str(changeset=changeset)}")

        msg_ctx.progress(30)

        deploy.deploy_teams(
            env_name=context.name,
            manifest_dir=manifest_dir,
        )

        msg_ctx.info("Orbit Workbench deployed")
        msg_ctx.progress(98)

        if cfn.does_stack_exist(stack_name=context.env_stack_name):
            context = ContextSerDe.load_context_from_ssm(env_name=manifest.name, type=Context)
            msg_ctx.info(f"Context updated: {filename}")
        msg_ctx.progress(99)

        if context.user_pool_id:
            cognito_users_url = orbit_cognito.get_users_url(user_pool_id=context.user_pool_id, region=context.region)
            msg_ctx.tip(f"Add users: {stylize(cognito_users_url, underline=True)}")

        if context.landing_page_url:
            msg_ctx.tip(f"Access Orbit Workbench: {stylize(f'{context.landing_page_url}/orbit/login', underline=True)}")
        else:
            raise RuntimeError("Landing Page URL not found.")
        msg_ctx.progress(100)
def destroy_team(context: "Context", team_context: "TeamContext") -> None:
    eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster"
    _logger.debug("EKSCTL stack name: %s", eks_stack_name)
    if cfn.does_stack_exist(stack_name=eks_stack_name):
        k8s_context = get_k8s_context(context=context)
        _logger.debug("kubectl k8s_context: %s", k8s_context)
        _logger.debug("Attempting kubectl delete for team %s",
                      team_context.name)
        output_path = _generate_team_context(context=context,
                                             team_context=team_context)
        sh.run(f"kubectl delete -f {output_path} --grace-period=0 --force "
               f"--ignore-not-found --wait --context {k8s_context}")
Example #28
0
def destroy(context: "Context") -> None:
    _logger.debug("Stack name: %s", context.env_stack_name)
    if cfn.does_stack_exist(stack_name=context.env_stack_name):
        docker.login(context=context)
        _logger.debug("DockerHub and ECR Logged in")
        ecr.cleanup_remaining_repos(env_name=context.name)
        args = [context.name]
        cdk.destroy(
            context=context,
            stack_name=context.env_stack_name,
            app_filename=os.path.join(ORBIT_CLI_ROOT, "remote_files", "cdk", "env.py"),
            args=args,
        )
        ssm.cleanup_context(env_name=context.name)
def destroy(context: T) -> None:
    if not (isinstance(context, Context)
            or isinstance(context, FoundationContext)):
        raise ValueError("Unknown 'context' Type")
    _logger.debug("Destroying %s CDK Toolkit...",
                  context.cdk_toolkit.stack_name)
    if context.cdk_toolkit.s3_bucket:
        if cfn.does_stack_exist(stack_name=context.cdk_toolkit.stack_name):
            try:
                s3.delete_bucket(bucket=context.cdk_toolkit.s3_bucket)
            except Exception as ex:
                _logger.debug("Skipping Toolkit bucket deletion. Cause: %s",
                              ex)
            cfn.destroy_stack(stack_name=context.cdk_toolkit.stack_name)
def _destroy_toolkit(
    env_name: str,
    top_level: str = "orbit",
    cdk_toolkit_bucket: Optional[str] = None,
) -> None:
    try:
        if cdk_toolkit_bucket:
            s3.delete_bucket(bucket=cdk_toolkit_bucket)
    except Exception as ex:
        _logger.debug("Skipping CDK Toolkit bucket deletion. Cause: %s", ex)
    toolkit_stack_name: str = f"{top_level}-{env_name}-toolkit"
    if cfn.does_stack_exist(stack_name=toolkit_stack_name):
        cfn.destroy_stack(stack_name=toolkit_stack_name)
    ssm.cleanup_env(env_name=env_name, top_level=top_level)