def destroy_kubeflow(context: Context) -> None: stack_name: str = f"orbit-{context.name}" final_eks_stack_name: str = f"eksctl-{stack_name}-cluster" _logger.debug("EKSCTL stack name: %s", final_eks_stack_name) if cfn.does_stack_exist(stack_name=final_eks_stack_name): kubectl.write_kubeconfig(context=context) for line in sh.run_iterating("kubectl get namespace kubeflow"): if '"kubeflow" not found' in line: return cluster_name = f"orbit-{context.name}" output_path = os.path.join(".orbit.out", context.name, "kubeflow", cluster_name) gen_kubeflow_config(context, output_path, cluster_name) _logger.debug("Destroying Kubeflow") output_path = os.path.abspath(output_path) _logger.debug(f"kubeflow config dir: {output_path}") utils.print_dir(output_path) timeouts = 0 while timeouts < 3: try: _logger.info("Deleting kubeflow resources") sh.run("./delete_kf.sh", cwd=output_path) except FailedShellCommand: _logger.info( "The command returned a non-zero exit code. Retrying to delete resources" ) timeouts += 1 time.sleep(300)
def get_user_pv(fs_name: str, plugin_id: str, context: "Context", team_context: "TeamContext", vars: Dict[str, Optional[str]]) -> None: for i in range(0, 15): run_command( f"kubectl get pvc -n {team_context.name} {fs_name} -o json > /tmp/pvc.json" ) with open("/tmp/pvc.json", "r") as f: pvc = json.load(f) if "spec" in pvc and "volumeName" in pvc["spec"] and pvc["spec"][ "volumeName"]: volumeName = pvc["spec"]["volumeName"] run_command(f"kubectl get pv {volumeName} -o json > /tmp/pv.json") with open("/tmp/pv.json", "r") as f: team_pv = json.load(f) _logger.debug("team pv: %s", json.dumps(team_pv, sort_keys=True, indent=4)) if "spec" in team_pv: vars["dnsname"] = team_pv["spec"]["csi"]["volumeAttributes"][ "dnsname"] vars["mountname"] = team_pv["spec"]["csi"]["volumeAttributes"][ "mountname"] vars["csiProvisionerIdentity"] = team_pv["spec"]["csi"][ "volumeAttributes"][ "storage.kubernetes.io/csiProvisionerIdentity"] vars["volumeHandle"] = team_pv["spec"]["csi"]["volumeHandle"] _logger.info(f"FSX Volume is {volumeName}") break _logger.info("FSX Volume not ready. Waiting a min") time.sleep(60) kubectl.write_kubeconfig(context=context) else: raise Exception(f"FSX Volume is not ready for plugin {plugin_id}")
def destroy_teams(args: Tuple[str, ...]) -> None: _logger.debug("args %s", args) env_name: str = args[0] context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=Context) _logger.debug("context.name %s", context.name) plugins.PLUGINS_REGISTRIES.load_plugins(context=context, plugin_changesets=[], teams_changeset=None) kubectl.write_kubeconfig(context=context) _logger.debug("Plugins loaded") for team_context in context.teams: plugins.PLUGINS_REGISTRIES.destroy_team_plugins( context=context, team_context=team_context) _logger.debug("Plugins destroyed") for team_context in context.teams: helm.destroy_team(context=context, team_context=team_context) _logger.debug("Helm Charts uninstalled") kubectl.destroy_teams(context=context) _logger.debug("Kubernetes Team components destroyed") eksctl.destroy_teams(context=context) _logger.debug("EKS Team Stacks destroyed") teams.destroy_all(context=context) _logger.debug("Teams Stacks destroyed") ssm.cleanup_teams(env_name=context.name)
def deploy_env(context: Context) -> None: eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster" _logger.debug("EKSCTL stack name: %s", eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name): repo_location = init_env_repo(context=context) repo = context.name add_repo(repo=repo, repo_location=repo_location) kubectl.write_kubeconfig(context=context) chart_name, chart_version, chart_package = package_chart( repo=repo, chart_path=os.path.join(CHARTS_PATH, "env", "landing-page"), values={ "region": context.region, "env_name": context.name, "user_pool_id": context.user_pool_id, "user_pool_client_id": context.user_pool_client_id, "identity_pool_id": context.identity_pool_id, "ssl_cert_arn": context.networking.frontend.ssl_cert_arn, "repository": context.images.landing_page.repository, "tag": context.images.landing_page.version, "cognito_external_provider": context.cognito_external_provider, "cognito_external_provider_label": context.cognito_external_provider if context.cognito_external_provider_label is None else context.cognito_external_provider_label, "cognito_external_provider_domain": "null" if context.cognito_external_provider_domain is None else context.cognito_external_provider_domain, "cognito_external_provider_redirect": "null" if context.cognito_external_provider_redirect is None else context.cognito_external_provider_redirect, "internal_load_balancer": '"false"' if context.networking.frontend.load_balancers_subnets else '"true"', "image_pull_policy": "Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent", }, ) install_chart( repo=repo, namespace="env", name="landing-page", chart_name=chart_name, chart_version=chart_version ) if context.install_image_replicator or not context.networking.data.internet_accessible: chart_name, chart_version, chart_package = package_chart( repo=repo, chart_path=os.path.join(CHARTS_PATH, "env", "image-replicator"), values={ "region": context.region, "account_id": context.account_id, "env_name": context.name, "repository": context.images.image_replicator.repository, "tag": context.images.image_replicator.version, "sts_ep": "legacy" if context.networking.data.internet_accessible else "regional", "image_pull_policy": "Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent", }, ) install_chart( repo=repo, namespace="kube-system", name="image-replicator", chart_name=chart_name, chart_version=chart_version, )
def deploy_team(context: Context, team_context: TeamContext) -> None: eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster" _logger.debug("EKSCTL stack name: %s", eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name): repo_location = init_team_repo(context=context, team_context=team_context) repo = team_context.name add_repo(repo=repo, repo_location=repo_location) kubectl.write_kubeconfig(context=context) team_charts_path = create_team_charts_copy(team_context=team_context, path=os.path.join( CHARTS_PATH, "team")) chart_name, chart_version, chart_package = package_chart( repo=repo, chart_path=os.path.join(team_charts_path, "jupyter-hub"), values={ "team": team_context.name, "efsid": context.shared_efs_fs_id, "region": context.region, "ssl_cert_arn": context.networking.frontend.ssl_cert_arn, "env_name": context.name, "jupyter_hub_repository": context.images.jupyter_hub.repository, "jupyter_hub_tag": context.images.jupyter_hub.version, "jupyter_user_repository": context.images.jupyter_user.repository, "jupyter_user_tag": context.images.jupyter_user.version, "grant_sudo": '"yes"' if team_context.grant_sudo else '"no"', "internal_load_balancer": '"false"' if context.networking.frontend.load_balancers_subnets else '"true"', "jupyterhub_inbound_ranges": str(team_context.jupyterhub_inbound_ranges if team_context. jupyterhub_inbound_ranges else [utils.get_dns_ip_cidr(context=context)]), "sts_ep": "legacy" if context.networking.data.internet_accessible else "regional", "image_pull_policy": "Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent", }, ) install_chart( repo=repo, namespace=team_context.name, name=f"{team_context.name}-jupyter-hub", chart_name=chart_name, chart_version=chart_version, )
def deploy_env(context: Context) -> None: eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster" _logger.debug("EKSCTL stack name: %s", eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name): repo_location = init_env_repo(context=context) repo = context.name add_repo(repo=repo, repo_location=repo_location) kubectl.write_kubeconfig(context=context)
def destroy_team(context: Context, team_context: TeamContext) -> None: eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster" _logger.debug("EKSCTL stack name: %s", eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name): repo_location = init_team_repo(context=context, team_context=team_context) repo = team_context.name add_repo(repo=repo, repo_location=repo_location) kubectl.write_kubeconfig(context=context) uninstall_chart(name=f"{team_context.name}-jupyter-hub")
def destroy_team(context: Context, team_context: TeamContext) -> None: eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster" _logger.debug("EKSCTL stack name: %s", eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name) and team_context.team_helm_repository: kubectl.write_kubeconfig(context=context) uninstall_all_charts(namespace=team_context.name) if team_context.team_helm_repository: # delete helm charts for team repo sh.run(f"aws s3 rm --recursive {team_context.team_helm_repository}") if team_context.user_helm_repository: # delete heml charts for user repo sh.run(f"aws s3 rm --recursive {team_context.user_helm_repository}")
def deploy_team(context: Context, team_context: TeamContext) -> None: eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster" _logger.debug("EKSCTL stack name: %s", eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name): team_repo_location = _init_team_repo(context=context, team_context=team_context) team_repo = team_context.name add_repo(repo=team_repo, repo_location=team_repo_location) kubectl.write_kubeconfig(context=context) team_charts_path = create_team_charts_copy(team_context=team_context, path=os.path.join(CHARTS_PATH, "team")) package_team_space_pkg(context, team_repo, team_charts_path, team_context) # this would be the location of all user-space packages that should be installed on a new user namespace user_repo_location = init_user_repo(context=context, team_context=team_context) user_repo = team_context.name + "--users" add_repo(repo=user_repo, repo_location=user_repo_location) user_charts_path = create_team_charts_copy(team_context=team_context, path=os.path.join(CHARTS_PATH, "user")) package_user_space_pkg(context, user_repo, user_charts_path, team_context)
def destroy_teams(env_name: str) -> None: plugins.PLUGINS_REGISTRIES.load_plugins(context=context, plugin_changesets=[], teams_changeset=None) kubectl.write_kubeconfig(context=context) for team_context in context.teams: destroy_team_user_resources(team_context.name) time.sleep(60) _logger.debug("Plugins loaded") for team_context in context.teams: plugins.PLUGINS_REGISTRIES.destroy_team_plugins( context=context, team_context=team_context) _logger.debug("Plugins destroyed") for team_context in context.teams: helm.destroy_team(context=context, team_context=team_context) _logger.debug("Helm Charts uninstalled") kubectl.destroy_teams(context=context) _logger.debug("Kubernetes Team components destroyed") eksctl.destroy_teams(context=context) _logger.debug("EKS Team Stacks destroyed") teams.destroy_all(context=context) _logger.debug("Teams Stacks destroyed") ssm.cleanup_teams(env_name=context.name)
def deploy_teams(args: Tuple[str, ...]) -> None: _logger.debug("args: %s", args) if len(args) == 1: env_name: str = args[0] else: raise ValueError("Unexpected number of values in args") context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=Context) _logger.debug("Context loaded.") changeset: Optional["Changeset"] = load_changeset_from_ssm(env_name=env_name) _logger.debug("Changeset loaded.") if changeset: plugins.PLUGINS_REGISTRIES.load_plugins( context=context, plugin_changesets=changeset.plugin_changesets, teams_changeset=changeset.teams_changeset ) _logger.debug("Plugins loaded") docker.login(context=context) _logger.debug("DockerHub and ECR Logged in") if changeset and changeset.teams_changeset and changeset.teams_changeset.removed_teams_names: kubectl.write_kubeconfig(context=context) for team_name in changeset.teams_changeset.removed_teams_names: team_context: Optional["TeamContext"] = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") _logger.debug("Destroying team %s", team_name) plugins.PLUGINS_REGISTRIES.destroy_team_plugins(context=context, team_context=team_context) _logger.debug("Team Plugins destroyed") helm.destroy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts uninstalled") kubectl.destroy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components destroyed") eksctl.destroy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack destroyed") teams.destroy_team(context=context, team_context=team_context) _logger.debug("Team %s destroyed", team_name) context.remove_team_by_name(name=team_name) ContextSerDe.dump_context_to_ssm(context=context) team_names = [t.name for t in context.teams] if changeset and changeset.teams_changeset and changeset.teams_changeset.added_teams_names: team_names.extend(changeset.teams_changeset.added_teams_names) manifest: Optional["Manifest"] = ManifestSerDe.load_manifest_from_ssm(env_name=context.name, type=Manifest) if manifest is None: raise RuntimeError(f"Manifest {context.name} not found!") kubectl.write_kubeconfig(context=context) for team_name in team_names: team_manifest = manifest.get_team_by_name(name=team_name) if team_manifest is None: raise RuntimeError(f"TeamManifest {team_name} not found!") teams.deploy_team(context=context, manifest=manifest, team_manifest=team_manifest) _logger.debug("Team Stacks deployed") team_context = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") eksctl.deploy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack deployed") kubectl.deploy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components deployed") helm.deploy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts installed") plugins.PLUGINS_REGISTRIES.deploy_team_plugins( context=context, team_context=team_context, changes=changeset.plugin_changesets if changeset else [] ) team_context.plugins = team_manifest.plugins ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Team Plugins deployed") k8s_context = utils.get_k8s_context(context=context) kubectl.fetch_kubectl_data(context=context, k8s_context=k8s_context, include_teams=True) _logger.debug("Teams deployed")
def deploy_teams(env_name: str, manifest_dir: str) -> None: if changeset: plugins.PLUGINS_REGISTRIES.load_plugins( context=context, plugin_changesets=changeset.plugin_changesets, teams_changeset=changeset.teams_changeset, ) _logger.debug("Plugins loaded") docker.login(context=context) _logger.debug("DockerHub and ECR Logged in") if changeset and changeset.teams_changeset and changeset.teams_changeset.removed_teams_names: kubectl.write_kubeconfig(context=context) for team_name in changeset.teams_changeset.removed_teams_names: team_context: Optional["TeamContext"] = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") _logger.debug("Destory all user namespaces for %s", team_context.name) sh.run(f"kubectl delete namespaces -l orbit/team={team_context.name},orbit/space=user --wait=true") _logger.debug("Destroying team %s", team_name) plugins.PLUGINS_REGISTRIES.destroy_team_plugins(context=context, team_context=team_context) _logger.debug("Team Plugins destroyed") helm.destroy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts uninstalled") kubectl.destroy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components destroyed") eksctl.destroy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack destroyed") teams.destroy_team(context=context, team_context=team_context) _logger.debug("Team %s destroyed", team_name) context.remove_team_by_name(name=team_name) ContextSerDe.dump_context_to_ssm(context=context) team_names = [t.name for t in context.teams] if changeset and changeset.teams_changeset and changeset.teams_changeset.added_teams_names: team_names.extend(changeset.teams_changeset.added_teams_names) manifest: Optional["Manifest"] = ManifestSerDe.load_manifest_from_ssm(env_name=context.name, type=Manifest) if manifest is None: raise RuntimeError(f"Manifest {context.name} not found!") kubectl.write_kubeconfig(context=context) for team_name in team_names: team_manifest = manifest.get_team_by_name(name=team_name) if team_manifest is None: raise RuntimeError(f"TeamManifest {team_name} not found!") teams.deploy_team(context=context, manifest=manifest, team_manifest=team_manifest) _logger.debug("Team Stacks deployed") team_context = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") eksctl.deploy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack deployed") kubectl.deploy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components deployed") helm.deploy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts installed") plugins.PLUGINS_REGISTRIES.deploy_team_plugins( context=context, team_context=team_context, changes=changeset.plugin_changesets if changeset else [] ) team_context.plugins = team_manifest.plugins ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Team Plugins deployed") _logger.debug("Teams deployed")
def destroy_env(context: Context) -> None: eks_stack_name: str = f"eksctl-orbit-{context.name}-cluster" _logger.debug("EKSCTL stack name: %s", eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name) and context.helm_repository: kubectl.write_kubeconfig(context=context)