def map_iam_identities( context: Context, cluster_name: str, eks_system_masters_roles_changes: Optional[ListChangeset] ) -> None: if eks_system_masters_roles_changes and eks_system_masters_roles_changes.added_values: for role in eks_system_masters_roles_changes.added_values: if iam.get_role(role) is None: _logger.debug(f"Skipping nonexisting IAM Role: {role}") continue arn = f"arn:aws:iam::{context.account_id}:role/{role}" for line in sh.run_iterating(f"eksctl get iamidentitymapping --cluster {cluster_name} --arn {arn}"): if line.startswith("Error: no iamidentitymapping with arn"): _logger.debug(f"Adding IAM Identity Mapping - Role: {arn}, Username: {role}, Group: system:masters") sh.run( f"eksctl create iamidentitymapping --cluster {cluster_name} --arn {arn} " f"--username {role} --group system:masters" ) cast(List[str], context.eks_system_masters_roles).append(role) ContextSerDe.dump_context_to_ssm(context=context) break else: _logger.debug(f"Skip adding existing IAM Identity Mapping - Role: {arn}") if eks_system_masters_roles_changes and eks_system_masters_roles_changes.removed_values: for role in eks_system_masters_roles_changes.removed_values: arn = f"arn:aws:iam::{context.account_id}:role/{role}" for line in sh.run_iterating(f"eksctl get iamidentitymapping --cluster {cluster_name} --arn {arn}"): if line.startswith("Error: no iamidentitymapping with arn"): _logger.debug(f"Skip removing nonexisting IAM Identity Mapping - Role: {arn}") break else: _logger.debug(f"Removing IAM Identity Mapping - Role: {arn}") sh.run(f"eksctl delete iamidentitymapping --cluster {cluster_name} --arn {arn} --all") cast(List[str], context.eks_system_masters_roles).remove(role) ContextSerDe.dump_context_to_ssm(context=context)
def deploy_toolkit( context: "Context", username: Optional[str], password: Optional[str], msg_ctx: MessagesContext, top_level: str = "orbit", ) -> None: credential_received: bool = username is not None and password is not None stack_exist: bool = cfn.does_stack_exist( stack_name=context.toolkit.stack_name) credential_exist: bool = dockerhub.does_credential_exist( context=context) if stack_exist else False image_manifests = [ cast(ImageManifest, getattr(context.images, i)) for i in context.images.names ] credential_required: bool = any([ im.get_source(account_id=context.account_id, region=context.region) == "dockerhub" for im in image_manifests ]) if stack_exist: if credential_required and not credential_exist and not credential_received: username, password = _request_dockerhub_credential(msg_ctx=msg_ctx) dockerhub.store_credential(context=context, username=username, password=password) credential_exist = True elif credential_received: dockerhub.store_credential( context=context, username=cast(str, username), password=cast(str, password), ) credential_exist = True else: context.toolkit.deploy_id = "".join( random.choice(string.ascii_lowercase) for i in range(6)) if credential_required and not credential_received: username, password = _request_dockerhub_credential(msg_ctx=msg_ctx) credential_exist = False msg_ctx.progress(6) _logger.debug("context.toolkit.deploy_id: %s", context.toolkit.deploy_id) template_filename: str = toolkit.synth(context=context, top_level=top_level) cfn.deploy_template(stack_name=context.toolkit.stack_name, filename=template_filename, env_tag=context.env_tag, s3_bucket=None) ContextSerDe.fetch_toolkit_data(context=context) ContextSerDe.dump_context_to_ssm(context=context) if credential_exist is False: dockerhub.store_credential( context=context, username=cast(str, username), password=cast(str, password), )
def fetch_kubectl_data(context: "Context", k8s_context: str, include_teams: bool) -> None: _logger.debug("Fetching Kubectl data...") if include_teams: for team in context.teams: _logger.debug("Fetching team %s URL parameter", team.name) url = k8s.get_service_hostname(name="jupyterhub-public", k8s_context=k8s_context, namespace=team.name) team.jupyter_url = url landing_page_url: str = k8s.get_service_hostname(name="landing-page", k8s_context=k8s_context, namespace="env") k8_dashboard_url: str = k8s.get_service_hostname( name="kubernetes-dashboard", k8s_context=k8s_context, namespace="kubernetes-dashboard") context.landing_page_url = f"https://{landing_page_url}" if context.cognito_external_provider: context.cognito_external_provider_redirect = context.landing_page_url context.k8_dashboard_url = f"https://{k8_dashboard_url}" _update_elbs(context=context) ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Kubectl data fetched successfully.")
def deploy_env(env_name: str, manifest_dir: str) -> None: docker.login(context=context) _logger.debug("DockerHub and ECR Logged in") cdk_toolkit.deploy(context=context) _logger.debug("CDK Toolkit Stack deployed") env.deploy( context=context, eks_system_masters_roles_changes=changeset.eks_system_masters_roles_changeset if changeset else None, ) _logger.debug("Env Stack deployed") eksctl.deploy_env( context=context, changeset=changeset, ) _logger.debug("EKS Environment Stack deployed") kubectl.deploy_env(context=context) _logger.debug("Kubernetes Environment components deployed") helm.deploy_env(context=context) _logger.debug("Helm Charts installed") k8s_context = utils.get_k8s_context(context=context) kubectl.fetch_kubectl_data(context=context, k8s_context=k8s_context) ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Updating userpool redirect") _update_userpool_client(context=context) _update_userpool(context=context)
def fetch_cluster_data(context: "Context", cluster_name: str) -> None: _logger.debug("Fetching Cluster data...") cluster_data = cast(Dict[str, Any], eks.describe_cluster(cluster_name=cluster_name)) context.eks_oidc_provider = cluster_data["cluster"]["identity"]["oidc"]["issuer"].replace("https://", "") context.cluster_sg_id = cluster_data["cluster"]["resourcesVpcConfig"]["clusterSecurityGroupId"] ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Cluster data fetched successfully.")
def _deploy_images_batch(context: "Context", images: List[Tuple[str, Optional[str], Optional[str], List[str]]]) -> None: _logger.debug("images:\n%s", images) new_images_manifest = {name: getattr(context.images, name) for name in context.images.names} max_workers = 5 with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: futures: List[Future[Any]] = [] name: str = "" dir: Optional[str] = None script: Optional[str] = None build_args: List[str] = [] for name, dir, script, build_args in images: _logger.debug("name: %s | script: %s", name, script) path = os.path.join(os.getcwd(), "bundle", name) _logger.debug("path: %s", path) image_attr_name = name.replace("-", "_") image_def: ImageManifest = getattr(context.images, image_attr_name) tag = image_def.version if image_def.get_source(account_id=context.account_id, region=context.region) == "code": dirs: List[Tuple[str, str]] = [(path, cast(str, dir))] else: dirs = [] bundle_path = bundle.generate_bundle(command_name=f"deploy_image-{name}", context=context, dirs=dirs) _logger.debug("bundle_path: %s", bundle_path) script_str = "NO_SCRIPT" if script is None else script build_args = [] if build_args is None else build_args buildspec = codebuild.generate_spec( context=context, plugins=False, cmds_build=[ "orbit remote --command _deploy_image " f"{context.name} {name} {dir} {script_str} {' '.join(build_args)}" ], ) new_images_manifest[image_attr_name] = ImageManifest( repository=f"{context.account_id}.dkr.ecr.{context.region}.amazonaws.com/orbit-{context.name}/{name}", version=tag, path=None, ) futures.append(executor.submit(_deploy_image_remotely, context, name, bundle_path, buildspec)) for f in futures: f.result() context.images = ImagesManifest(**new_images_manifest) ContextSerDe.dump_context_to_ssm(context=context)
def _deploy_toolkit( context: "Context", top_level: str = "orbit", ) -> None: stack_exist: bool = cfn.does_stack_exist(stack_name=context.toolkit.stack_name) if not stack_exist: context.toolkit.deploy_id = "".join(random.choice(string.ascii_lowercase) for i in range(6)) _logger.debug("context.toolkit.deploy_id: %s", context.toolkit.deploy_id) template_filename: str = toolkit.synth(context=context, top_level=top_level) cfn.deploy_template( stack_name=context.toolkit.stack_name, filename=template_filename, env_tag=context.env_tag, s3_bucket=None ) ContextSerDe.fetch_toolkit_data(context=context) ContextSerDe.dump_context_to_ssm(context=context)
def deploy_team(context: "Context", manifest: Manifest, team_manifest: TeamManifest) -> None: # Pull team spacific custom cfn plugin, trigger pre_hook team_context: Optional["TeamContext"] = create_team_context_from_manifest( manifest=manifest, team_manifest=team_manifest) _logger.debug(f"team_context={team_context}") if team_context: _logger.debug(f"team_context.plugins={team_context.plugins}") _logger.debug("Calling team pre_hook") for plugin in team_context.plugins: hook: plugins.HOOK_TYPE = plugins.PLUGINS_REGISTRIES.get_hook( context=context, team_name=team_context.name, plugin_name=plugin.plugin_id, hook_name="pre_hook", ) if hook is not None: _logger.debug(f"Found pre_hook for plugin_id {plugin}") hook(plugin.plugin_id, context, team_context, plugin.parameters) _logger.debug("End of pre_hook plugin execution") else: _logger.debug( f"Skipping pre_hook for unknown Team: {team_manifest.name}") args = [context.name, team_manifest.name] cdk.deploy( context=context, stack_name=f"orbit-{manifest.name}-{team_manifest.name}", app_filename=os.path.join(ORBIT_CLI_ROOT, "remote_files", "cdk", "team.py"), args=args, ) team_context = context.get_team_by_name(name=team_manifest.name) if team_context: team_context.fetch_team_data() else: team_context = create_team_context_from_manifest( manifest=manifest, team_manifest=team_manifest) team_context.fetch_team_data() context.teams.append(team_context) _logger.debug( f"team_context.helm_repository: s3://{context.toolkit.s3_bucket}/helm/repositories/teams/{team_context.name}" ) team_context.team_helm_repository = f"s3://{context.toolkit.s3_bucket}/helm/repositories/teams/{team_context.name}" team_context.user_helm_repository = f"s3://{context.toolkit.s3_bucket}/helm/repositories/user/{team_context.name}" ContextSerDe.dump_context_to_ssm(context=context)
def fetch_kubectl_data(context: "Context", k8s_context: str) -> None: _logger.debug("Fetching Kubectl data...") ingress_url: str = k8s.get_ingress_dns(name="istio-ingress", k8s_context=k8s_context, namespace="istio-system") if context.networking.frontend.custom_domain_name: context.landing_page_url = f"https://{context.networking.frontend.custom_domain_name}" else: context.landing_page_url = f"https://{ingress_url}" if context.cognito_external_provider: context.cognito_external_provider_redirect = context.landing_page_url _update_elbs(context=context) ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Kubectl data fetched successfully.")
def deploy_env(args: Tuple[str, ...]) -> None: _logger.debug("args: %s", args) if len(args) == 2: env_name: str = args[0] skip_images_remote_flag: str = str(args[1]) else: raise ValueError("Unexpected number of values in args") context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=Context) _logger.debug("Context loaded.") changeset: Optional["Changeset"] = load_changeset_from_ssm(env_name=env_name) _logger.debug("Changeset loaded.") docker.login(context=context) _logger.debug("DockerHub and ECR Logged in") cdk_toolkit.deploy(context=context) _logger.debug("CDK Toolkit Stack deployed") env.deploy( context=context, eks_system_masters_roles_changes=changeset.eks_system_masters_roles_changeset if changeset else None, ) _logger.debug("Env Stack deployed") deploy_images_remotely(context=context, skip_images=skip_images_remote_flag == "skip-images") _logger.debug("Docker Images deployed") eksctl.deploy_env( context=context, changeset=changeset, ) _logger.debug("EKS Environment Stack deployed") kubectl.deploy_env(context=context) _logger.debug("Kubernetes Environment components deployed") helm.deploy_env(context=context) _logger.debug("Helm Charts installed") k8s_context = utils.get_k8s_context(context=context) kubectl.fetch_kubectl_data(context=context, k8s_context=k8s_context, include_teams=False) ContextSerDe.dump_context_to_ssm(context=context)
def deploy_teams(args: Tuple[str, ...]) -> None: _logger.debug("args: %s", args) if len(args) == 1: env_name: str = args[0] else: raise ValueError("Unexpected number of values in args") context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=Context) _logger.debug("Context loaded.") changeset: Optional["Changeset"] = load_changeset_from_ssm(env_name=env_name) _logger.debug("Changeset loaded.") if changeset: plugins.PLUGINS_REGISTRIES.load_plugins( context=context, plugin_changesets=changeset.plugin_changesets, teams_changeset=changeset.teams_changeset ) _logger.debug("Plugins loaded") docker.login(context=context) _logger.debug("DockerHub and ECR Logged in") if changeset and changeset.teams_changeset and changeset.teams_changeset.removed_teams_names: kubectl.write_kubeconfig(context=context) for team_name in changeset.teams_changeset.removed_teams_names: team_context: Optional["TeamContext"] = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") _logger.debug("Destroying team %s", team_name) plugins.PLUGINS_REGISTRIES.destroy_team_plugins(context=context, team_context=team_context) _logger.debug("Team Plugins destroyed") helm.destroy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts uninstalled") kubectl.destroy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components destroyed") eksctl.destroy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack destroyed") teams.destroy_team(context=context, team_context=team_context) _logger.debug("Team %s destroyed", team_name) context.remove_team_by_name(name=team_name) ContextSerDe.dump_context_to_ssm(context=context) team_names = [t.name for t in context.teams] if changeset and changeset.teams_changeset and changeset.teams_changeset.added_teams_names: team_names.extend(changeset.teams_changeset.added_teams_names) manifest: Optional["Manifest"] = ManifestSerDe.load_manifest_from_ssm(env_name=context.name, type=Manifest) if manifest is None: raise RuntimeError(f"Manifest {context.name} not found!") kubectl.write_kubeconfig(context=context) for team_name in team_names: team_manifest = manifest.get_team_by_name(name=team_name) if team_manifest is None: raise RuntimeError(f"TeamManifest {team_name} not found!") teams.deploy_team(context=context, manifest=manifest, team_manifest=team_manifest) _logger.debug("Team Stacks deployed") team_context = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") eksctl.deploy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack deployed") kubectl.deploy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components deployed") helm.deploy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts installed") plugins.PLUGINS_REGISTRIES.deploy_team_plugins( context=context, team_context=team_context, changes=changeset.plugin_changesets if changeset else [] ) team_context.plugins = team_manifest.plugins ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Team Plugins deployed") k8s_context = utils.get_k8s_context(context=context) kubectl.fetch_kubectl_data(context=context, k8s_context=k8s_context, include_teams=True) _logger.debug("Teams deployed")
def deploy_env(context: "Context", changeset: Optional[Changeset]) -> None: stack_name: str = f"orbit-{context.name}" final_eks_stack_name: str = f"eksctl-{stack_name}-cluster" _logger.debug("EKSCTL stack name: %s", final_eks_stack_name) _logger.debug("Synthetizing the EKSCTL Environment manifest") cluster_name = f"orbit-{context.name}" if cfn.does_stack_exist(stack_name=final_eks_stack_name) is False: requested_nodegroups = ( changeset.managed_nodegroups_changeset.added_nodegroups if changeset and changeset.managed_nodegroups_changeset else [] ) _logger.debug(f"requested nodegroups: {[n.name for n in requested_nodegroups]}") output_filename = generate_manifest(context=context, name=stack_name, nodegroups=requested_nodegroups) _logger.debug("Deploying EKSCTL Environment resources") sh.run( f"eksctl create cluster -f {output_filename} --install-nvidia-plugin=false " "--write-kubeconfig --verbose 4" ) username = f"orbit-{context.name}-admin" arn = f"arn:aws:iam::{context.account_id}:role/{username}" _logger.debug(f"Adding IAM Identity Mapping - Role: {arn}, Username: {username}, Group: system:masters") sh.run( f"eksctl create iamidentitymapping --cluster {cluster_name} --arn {arn} " f"--username {username} --group system:masters" ) context.managed_nodegroups = requested_nodegroups for ng in requested_nodegroups: if ng.nodes_num_desired < 1 or ng.nodes_num_min < 1: _logger.debug(f"Reducing AutoScaling capacity for newly create NodeGroup: {ng.name}") autoscaling.update_nodegroup_autoscaling_group( cluster_name=f"orbit-{context.name}", nodegroup_manifest=ng ) ContextSerDe.dump_context_to_ssm(context=context) else: current_nodegroups = context.managed_nodegroups _logger.debug(f"current nodegroups: {[n.name for n in current_nodegroups]}") sh.run(f"eksctl utils write-kubeconfig --cluster orbit-{context.name} --set-kubeconfig-context") if changeset and changeset.managed_nodegroups_changeset: if changeset.managed_nodegroups_changeset.added_nodegroups: output_filename = generate_manifest( context=context, name=stack_name, nodegroups=changeset.managed_nodegroups_changeset.added_nodegroups ) nodegroups = [ ng.name for ng in changeset.managed_nodegroups_changeset.added_nodegroups if not eks.describe_nodegroup(cluster_name=cluster_name, nodegroup_name=ng.name) ] _logger.debug("Creating ManagedNodeGroups: %s", nodegroups) sh.run(f"eksctl create nodegroup -f {output_filename} --include={','.join(nodegroups)} --verbose 4") current_nodegroups.extend( [ng for ng in changeset.managed_nodegroups_changeset.added_nodegroups if ng.name in nodegroups] ) context.managed_nodegroups = current_nodegroups ContextSerDe.dump_context_to_ssm(context=context) if changeset.managed_nodegroups_changeset.removed_nodegroups: output_filename = generate_manifest( context=context, name=stack_name, nodegroups=changeset.managed_nodegroups_changeset.removed_nodegroups, ) nodegroups = [ ng.name for ng in changeset.managed_nodegroups_changeset.removed_nodegroups if eks.describe_nodegroup(cluster_name=cluster_name, nodegroup_name=ng.name) ] _logger.debug("Deleting ManagedNodeGroups: %s", nodegroups) sh.run( f"eksctl delete nodegroup -f {output_filename} --include={','.join(nodegroups)} " "--approve --wait --drain=false --verbose 4" ) context.managed_nodegroups = [ng for ng in current_nodegroups if ng.name not in nodegroups] ContextSerDe.dump_context_to_ssm(context=context) if changeset.managed_nodegroups_changeset.modified_nodegroups: for ng in changeset.managed_nodegroups_changeset.modified_nodegroups: autoscaling.update_nodegroup_autoscaling_group( cluster_name=f"orbit-{context.name}", nodegroup_manifest=ng ) eks_system_masters_changeset = ( changeset.eks_system_masters_roles_changeset if changeset and changeset.eks_system_masters_roles_changeset else None ) map_iam_identities( context=context, cluster_name=cluster_name, eks_system_masters_roles_changes=eks_system_masters_changeset, ) associate_open_id_connect_provider(context=context, cluster_name=cluster_name) fetch_cluster_data(context=context, cluster_name=cluster_name) authorize_cluster_pod_security_group(context=context) iam.add_assume_role_statement( role_name=f"orbit-{context.name}-cluster-autoscaler-role", statement={ "Effect": "Allow", "Principal": {"Federated": f"arn:aws:iam::{context.account_id}:oidc-provider/{context.eks_oidc_provider}"}, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringLike": { f"{context.eks_oidc_provider}:sub": "system:serviceaccount:kube-system:cluster-autoscaler" } }, }, ) iam.add_assume_role_statement( role_name=f"orbit-{context.name}-eks-cluster-role", statement={ "Effect": "Allow", "Principal": {"Federated": f"arn:aws:iam::{context.account_id}:oidc-provider/{context.eks_oidc_provider}"}, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringLike": { f"{context.eks_oidc_provider}:sub": "system:serviceaccount:kube-system:fsx-csi-controller-sa" } }, }, ) iam.add_assume_role_statement( role_name=f"orbit-{context.name}-admin", statement={ "Effect": "Allow", "Principal": {"Federated": f"arn:aws:iam::{context.account_id}:oidc-provider/{context.eks_oidc_provider}"}, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringLike": { f"{context.eks_oidc_provider}:sub": f"system:serviceaccount:kube-system:orbit-{context.name}-admin" } }, }, ) _logger.debug("EKSCTL deployed")
def destroy_all(context: "Context") -> None: for team_context in context.teams: destroy_team(context=context, team_context=team_context) context.teams = [] ContextSerDe.dump_context_to_ssm(context=context)
def deploy_env(context: "Context", changeset: Optional[Changeset]) -> None: stack_name: str = f"orbit-{context.name}" final_eks_stack_name: str = f"eksctl-{stack_name}-cluster" _logger.debug("EKSCTL stack name: %s", final_eks_stack_name) _logger.debug("Synthetizing the EKSCTL Environment manifest") cluster_name = f"orbit-{context.name}" if cfn.does_stack_exist(stack_name=final_eks_stack_name) is False: requested_nodegroups = ( changeset.managed_nodegroups_changeset.added_nodegroups if changeset and changeset.managed_nodegroups_changeset else []) _logger.debug( f"requested nodegroups: {[n.name for n in requested_nodegroups]}") output_filename = generate_manifest(context=context, name=stack_name, nodegroups=requested_nodegroups) _logger.debug("Deploying EKSCTL Environment resources") sh.run( f"eksctl create cluster -f {output_filename} --write-kubeconfig --verbose 4" ) username = f"orbit-{context.name}-admin" arn = f"arn:aws:iam::{context.account_id}:role/{username}" _logger.debug( f"Adding IAM Identity Mapping - Role: {arn}, Username: {username}, Group: system:masters" ) sh.run( f"eksctl create iamidentitymapping --cluster {cluster_name} --arn {arn} " f"--username {username} --group system:masters") context.managed_nodegroups = requested_nodegroups ContextSerDe.dump_context_to_ssm(context=context) else: current_nodegroups = context.managed_nodegroups _logger.debug( f"current nodegroups: {[n.name for n in current_nodegroups]}") sh.run( f"eksctl utils write-kubeconfig --cluster orbit-{context.name} --set-kubeconfig-context" ) if changeset and changeset.managed_nodegroups_changeset: if changeset.managed_nodegroups_changeset.added_nodegroups: output_filename = generate_manifest( context=context, name=stack_name, nodegroups=changeset.managed_nodegroups_changeset. added_nodegroups) nodegroups = [ ng.name for ng in changeset.managed_nodegroups_changeset.added_nodegroups if not eks.describe_nodegroup(cluster_name=cluster_name, nodegroup_name=ng.name) ] _logger.debug("Creating ManagedNodeGroups: %s", nodegroups) sh.run( f"eksctl create nodegroup -f {output_filename} --include={','.join(nodegroups)} --verbose 4" ) current_nodegroups.extend([ ng for ng in changeset.managed_nodegroups_changeset.added_nodegroups if ng.name in nodegroups ]) context.managed_nodegroups = current_nodegroups ContextSerDe.dump_context_to_ssm(context=context) if changeset.managed_nodegroups_changeset.removed_nodegroups: output_filename = generate_manifest( context=context, name=stack_name, nodegroups=changeset.managed_nodegroups_changeset. removed_nodegroups, ) nodegroups = [ ng.name for ng in changeset.managed_nodegroups_changeset.removed_nodegroups if eks.describe_nodegroup(cluster_name=cluster_name, nodegroup_name=ng.name) ] _logger.debug("Deleting ManagedNodeGroups: %s", nodegroups) sh.run( f"eksctl delete nodegroup -f {output_filename} --include={','.join(nodegroups)} " "--approve --wait --drain=false --verbose 4") context.managed_nodegroups = [ ng for ng in current_nodegroups if ng.name not in nodegroups ] ContextSerDe.dump_context_to_ssm(context=context) eks_system_masters_changeset = ( changeset.eks_system_masters_roles_changeset if changeset and changeset.eks_system_masters_roles_changeset else None) map_iam_identities( context=context, cluster_name=cluster_name, eks_system_masters_roles_changes=eks_system_masters_changeset, ) fetch_cluster_data(context=context, cluster_name=cluster_name) associate_open_id_connect_provider(context=context, cluster_name=cluster_name) authorize_cluster_pod_security_group(context=context) _logger.debug("EKSCTL deployed")
def deploy_teams(env_name: str, manifest_dir: str) -> None: if changeset: plugins.PLUGINS_REGISTRIES.load_plugins( context=context, plugin_changesets=changeset.plugin_changesets, teams_changeset=changeset.teams_changeset, ) _logger.debug("Plugins loaded") docker.login(context=context) _logger.debug("DockerHub and ECR Logged in") if changeset and changeset.teams_changeset and changeset.teams_changeset.removed_teams_names: kubectl.write_kubeconfig(context=context) for team_name in changeset.teams_changeset.removed_teams_names: team_context: Optional["TeamContext"] = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") _logger.debug("Destory all user namespaces for %s", team_context.name) sh.run(f"kubectl delete namespaces -l orbit/team={team_context.name},orbit/space=user --wait=true") _logger.debug("Destroying team %s", team_name) plugins.PLUGINS_REGISTRIES.destroy_team_plugins(context=context, team_context=team_context) _logger.debug("Team Plugins destroyed") helm.destroy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts uninstalled") kubectl.destroy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components destroyed") eksctl.destroy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack destroyed") teams.destroy_team(context=context, team_context=team_context) _logger.debug("Team %s destroyed", team_name) context.remove_team_by_name(name=team_name) ContextSerDe.dump_context_to_ssm(context=context) team_names = [t.name for t in context.teams] if changeset and changeset.teams_changeset and changeset.teams_changeset.added_teams_names: team_names.extend(changeset.teams_changeset.added_teams_names) manifest: Optional["Manifest"] = ManifestSerDe.load_manifest_from_ssm(env_name=context.name, type=Manifest) if manifest is None: raise RuntimeError(f"Manifest {context.name} not found!") kubectl.write_kubeconfig(context=context) for team_name in team_names: team_manifest = manifest.get_team_by_name(name=team_name) if team_manifest is None: raise RuntimeError(f"TeamManifest {team_name} not found!") teams.deploy_team(context=context, manifest=manifest, team_manifest=team_manifest) _logger.debug("Team Stacks deployed") team_context = context.get_team_by_name(name=team_name) if team_context is None: raise RuntimeError(f"TeamContext {team_name} not found!") eksctl.deploy_team(context=context, team_context=team_context) _logger.debug("EKS Team Stack deployed") kubectl.deploy_team(context=context, team_context=team_context) _logger.debug("Kubernetes Team components deployed") helm.deploy_team(context=context, team_context=team_context) _logger.debug("Team Helm Charts installed") plugins.PLUGINS_REGISTRIES.deploy_team_plugins( context=context, team_context=team_context, changes=changeset.plugin_changesets if changeset else [] ) team_context.plugins = team_manifest.plugins ContextSerDe.dump_context_to_ssm(context=context) _logger.debug("Team Plugins deployed") _logger.debug("Teams deployed")
def deploy_images_remotely(env: str, requested_image: Optional[str] = None) -> None: _logger.debug(f"deploy_images_remotely args: {env} {requested_image}") image_dir = os.path.realpath(os.path.join(ORBIT_CLI_ROOT, "../../images")) context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env, type=Context) _logger.debug(f"context loaded: {env}") codebuild_role = str(context.toolkit.admin_role) _logger.debug(f"The CODEBUILD_ROLE is {codebuild_role}") if requested_image: if os.path.isdir(f"{image_dir}/{requested_image}"): _logger.debug(f"Request build of single image {requested_image}") (image_name, image_addr, version) = _deploy_images_batch( path=f"{image_dir}/{requested_image}", image_name=requested_image, env=env, build_execution_role=codebuild_role, ) _logger.debug(f"Returned from _deploy_images_batch: {image_name} {image_addr} {version}") im = str(image_name).replace("-", "_") new_image_manifest = ImageManifest(repository=str(image_addr), version=str(version)) _logger.debug(f"New image manifest from single built image: {new_image_manifest}") context_latest: "Context" = ContextSerDe.load_context_from_ssm(env_name=env, type=Context) context_latest.images.__dict__[im] = new_image_manifest ContextSerDe.dump_context_to_ssm(context=context_latest) else: _logger.error("An image was requested to be built, but it doesn't exist in the images/ dir") else: new_images_manifest = {} # first build k8s-utilities (image_name, image_addr, version) = _deploy_images_batch( path=f"{image_dir}/k8s-utilities", image_name="k8s-utilities", env=env, build_execution_role=codebuild_role, ) _logger.debug(f"Returned from _deploy_images_batch: {image_name} {image_addr} {version}") im = str(image_name).replace("-", "_") new_images_manifest[im] = ImageManifest(repository=str(image_addr), version=str(version)) # now build the images dependent on k8s-utilities list_subfolders_with_paths = [f.path for f in os.scandir(image_dir) if f.is_dir()] max_workers = 4 with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: def deploy_images_batch_helper(args_tuple: List[Any]) -> List[Tuple[str, str, str]]: return _deploy_images_batch( path=args_tuple[0], image_name=args_tuple[1], env=args_tuple[2], build_execution_role=args_tuple[3] ) args_tuples = [ (path, path.split("/")[-1], env, codebuild_role) for path in list_subfolders_with_paths if "k8s-utilities" not in path ] results = list(executor.map(deploy_images_batch_helper, args_tuples)) for res in results: im = str(res[0]).replace("-", "_") new_images_manifest[im] = ImageManifest(repository=str(res[1]), version=str(res[2])) _logger.debug(f"New image manifest from all images: {new_images_manifest}") context_latest_all: "Context" = ContextSerDe.load_context_from_ssm(env_name=env, type=Context) context_latest_all.images = ImagesManifest(**new_images_manifest) # type: ignore ContextSerDe.dump_context_to_ssm(context=context_latest_all)