def _remove_team_resources(namespace: str, team_spec: str, logger: kopf.Logger, **_: Any):  # type: ignore
    v1 = CoreV1Api()
    logger.info(f"_remove_team_resources looking with orbit/label={team_spec}")
    # Get all the namespaces with the team label
    label_selector = f"orbit/team={team_spec}"
    all_namespaces = v1.list_namespace(label_selector=label_selector).to_dict()
    all_ns = [
        item.get("metadata").get("name") for item in all_namespaces["items"] if item.get("metadata", {}).get("name")
    ]
    # List all the resources we want to force-delete:
    # group, version, plural, status_element
    custom_object_list = [
        ["sagemaker.aws.amazon.com", "v1", "hyperparametertuningjobs", "trainingJobStatus"],
        ["sagemaker.aws.amazon.com", "v1", "trainingjobs", "trainingJobStatus"],
        ["sagemaker.aws.amazon.com", "v1", "batchtransformjobs", "transformJobStatus"],
        ["sagemaker.aws.amazon.com", "v1", "hostingdeployments", "status"],
        ["kubeflow.org", "v1", "notebooks", "NA"],
        ["kubeflow.org", "v1", "profile", "NA"],
        ["batch", "v1", "jobs", "NA"],
        ["apps", "v1", "deployments", "NA"],
        ["apps", "v1", "statefulsets", "NA"],
    ]

    for namespace in all_ns:
        logger.info(f"Looking at NS {namespace}")

        for co in custom_object_list:
            _delete_custom_objects(group=co[0], version=co[1], plural=co[2], namespace=namespace, logger=logger)
        _delete_pods(namespace=namespace, logger=logger)

        for co in custom_object_list[0:4]:
            _patch_and_delete_stubborn_custom_resources(
                group=co[0], version=co[1], plural=co[2], status_element=co[3], namespace=namespace, logger=logger
            )
Exemple #2
0
def scheduler(status: kopf.Status, patch: kopf.Patch, logger: kopf.Logger,
              **_: Any) -> str:
    replication = status.get("replication", {})
    replication["codeBuildStatus"] = None
    replication["codeBuildPhase"] = None
    replication["codeBuildId"] = None

    attempt = replication.get("attempt", 0) + 1
    if attempt > CONFIG["max_replication_attempts"]:
        replication["replicationStatus"] = "MaxAttemptsExceeded"
        replication["attempt"] = attempt

        patch["status"] = {"replication": replication}
    else:
        with LOCK:
            global WORKERS_IN_PROCESS
            logger.debug("WORKERS_IN_PROCESS: %s", WORKERS_IN_PROCESS)
            if WORKERS_IN_PROCESS < CONFIG["workers"]:
                WORKERS_IN_PROCESS += 1
                replication["replicationStatus"] = "Scheduled"
                replication["attempt"] = attempt

                patch["status"] = {"replication": replication}
                logger.info("Schedule Attempt: %s", replication["attempt"])

    return cast(str, replication["replicationStatus"])
Exemple #3
0
def codebuild_runner(
    spec: kopf.Spec,
    patch: kopf.Patch,
    status: kopf.Status,
    logger: kopf.Logger,
    **_: Any,
) -> str:
    replication = status.get("replication", {})

    build_id, error = imagereplication_utils.replicate_image(
        src=spec["source"], dest=spec["destination"], config=CONFIG)

    replication["replicationStatus"] = "Replicating"
    replication["codeBuildId"] = build_id

    if error:
        replication["replicationStatus"] = "Failed"
        replication["failureDelay"] = 30
        with LOCK:
            global WORKERS_IN_PROCESS
            WORKERS_IN_PROCESS -= 1

    patch["status"] = {"replication": replication}
    if error:
        logger.error("CodeBuildId: %s Error: %s", build_id, error)
    else:
        logger.info("CodeBuildId: %s Error: %s", build_id, error)

    return cast(str, replication["replicationStatus"])
Exemple #4
0
def codebuild_monitor(status: kopf.Status, patch: kopf.Patch,
                      logger: kopf.Logger, **_: Any) -> str:
    replication = status.get("replication", {})

    build_id = replication.get("codeBuildId", None)

    client = boto3.client("codebuild")
    build = client.batch_get_builds(ids=[build_id])["builds"][0]
    replication["codeBuildStatus"] = build["buildStatus"]
    replication["codeBuildPhase"] = build["currentPhase"]

    if replication["codeBuildStatus"] not in "IN_PROGRESS":
        logger.info("CodeBuildId: %s BuildStatus: %s", build_id,
                    replication["codeBuildStatus"])
        with LOCK:
            global WORKERS_IN_PROCESS
            WORKERS_IN_PROCESS -= 1
        codebuild_attempts = replication.get("codeBuildAttempts", [])
        codebuild_attempts.append({
            "codeBuildId": build_id,
            "codeBuildStatus": build["buildStatus"],
            "codeBuildPhase": build["currentPhase"],
        })
        replication["codeBuildAttempts"] = codebuild_attempts
        replication["replicationStatus"] = "Complete" if build[
            "buildStatus"] == "SUCCEEDED" else "Failed"

    if replication["replicationStatus"] == "Failed":
        replication["failureDelay"] = 30

    patch["status"] = {"replication": replication}
    return cast(str, replication["codeBuildStatus"])
def configure(settings: kopf.OperatorSettings, logger: kopf.Logger, **_: Any) -> None:
    settings.persistence.progress_storage = kopf.MultiProgressStorage(
        [
            kopf.AnnotationsProgressStorage(prefix="orbit.aws"),
            kopf.StatusProgressStorage(field="status.orbit-aws"),
        ]
    )
    settings.posting.level = logging.INFO
    settings.persistence.finalizer = "teamspace-operator.orbit.aws/kopf-finalizer"
    settings.posting.level = logging.getLevelName(os.environ.get("EVENT_LOG_LEVEL", "INFO"))
    logger.info("START the Teamspace Controller")
def _uninstall_chart(helm_release: str, namespace: str,
                     logger: kopf.Logger) -> bool:
    install_status = True
    cmd = f"/usr/local/bin/helm uninstall --debug --namespace {namespace} {helm_release}"
    try:
        logger.debug("running uninstall cmd: %s", cmd)
        output = run_command(cmd)
        logger.debug(output)
        logger.info("finished uninstall cmd: %s", cmd)
    except Exception:
        logger.error("errored cmd: %s", cmd)
        install_status = False
    return install_status
def _delete_pods(namespace: str, logger: kopf.Logger, use_async=True, **_: Any):  # type: ignore
    logger.info(f"Deleting ALL PODS in ns {namespace}")
    api = CoreV1Api()
    try:
        api.delete_collection_namespaced_pod(
            namespace=namespace,
            async_req=use_async,
            grace_period_seconds=0,
            propagation_policy="Background",
            body=V1DeleteOptions(),
        )
    except ApiException as e:
        logger.warn("calling CustomObjectsApi->delete_collection_namespaced_pod: %s\n" % e)
def uninstall_team(namespace: str, name: str, spec: kopf.Spec, patch: kopf.Patch, logger: kopf.Logger, **_: Any) -> str:
    logger.info("In UNINSTALL_TEAM  Teamspace Controller")

    # spec:
    # env: ${env_name}
    # space: team
    # team: ${team}
    team_spec = spec.get("team", None)
    logger.info(f"Preparing to Destroy all resources in team namespace {namespace}")
    if team_spec:
        _remove_team_resources(namespace=namespace, team_spec=team_spec, logger=logger)
        _remove_user_namespaces(namespace=namespace, team_spec=team_spec, logger=logger)
        patch["status"] = {"teamspaceOperator": {"status": "DeleteProcessed"}}
    else:
        logging.warn("Team spec not found...moving on")
    return "Uninstalled"
def _remove_user_namespaces(namespace: str, team_spec: str, logger: kopf.Logger, **_: Any):  # type: ignore
    logger.info(
        f"Removing all user namespaces with labels orbit/team={team_spec},orbit/space=user in namespace {namespace} "
    )

    v1 = CoreV1Api()
    label_selector = f"orbit/team={team_spec},orbit/space=user"
    all_namespaces = v1.list_namespace(label_selector=label_selector).to_dict()

    all_ns = [
        item.get("metadata").get("name") for item in all_namespaces["items"] if item.get("metadata", {}).get("name")
    ]
    for ns in all_ns:
        logger.info(f"Calling delete namespace {ns}")
        try:
            v1.delete_namespace(name=ns, async_req=True)
        except ApiException as e:
            logger.warn("calling CoreV1API->delete_namespace had an error: %s\n" % e)
def configure(settings: kopf.OperatorSettings, logger: kopf.Logger,
              **_: Any) -> None:
    settings.admission.server = kopf.WebhookServer(
        cafile="/certs/ca.crt",
        certfile="/certs/tls.crt",
        pkeyfile="/certs/tls.key",
        port=443,
    )
    settings.persistence.progress_storage = kopf.MultiProgressStorage([
        kopf.AnnotationsProgressStorage(prefix="orbit.aws"),
        kopf.StatusProgressStorage(field="status.orbit-aws"),
    ])
    settings.persistence.finalizer = "imagereplication-pod-webhook.orbit.aws/kopf-finalizer"
    settings.posting.level = logging.getLevelName(
        os.environ.get("EVENT_LOG_LEVEL", "INFO"))

    global CONFIG
    CONFIG = imagereplication_utils.get_config()
    logger.info("CONFIG: %s", CONFIG)
def _patch_and_delete_stubborn_custom_resources(  # type: ignore
    group: str,
    version: str,
    plural: str,
    namespace: str,
    status_element: str,
    logger: kopf.Logger,
    use_async=True,
    **_: Any,
):
    logger.info(f"_patch_and_delete_stubborn_custom_resources for {plural}.{group} in namespace {namespace}")
    co = CustomObjectsApi()
    resp = co.list_namespaced_custom_object(group=group, version=version, plural=plural, namespace=namespace)
    failed_res = [
        item.get("metadata").get("name")
        for item in resp["items"]
        if item.get("status", {}).get(status_element) in ["Failed", "Completed", "InProgress"]
    ]
    for item in failed_res:
        try:
            logger.info(f"Patching item {item} in {plural}.{group}")
            patch = json.loads("""{"metadata":{"finalizers":[]}}""")
            co.patch_namespaced_custom_object(
                group=group, version=version, plural=plural, namespace=namespace, name=item, body=patch
            )
            logger.info(f"Deleting item {item} in {plural}.{group}")
            co.delete_namespaced_custom_object(
                group=group,
                version=version,
                plural=plural,
                namespace=namespace,
                name=item,
            )
        except ApiException as e:
            logger.warn("Trying to patch and delete failed: %s\n" % e)
Exemple #12
0
def replication_checker(
    spec: kopf.Spec,
    status: kopf.Status,
    patch: kopf.Patch,
    logger: kopf.Logger,
    **_: Any,
) -> str:
    if status.get("replication", None) is not None:
        return cast(str, status["replication"].get("replicationStatus",
                                                   "Unknown"))

    replication = {}
    if imagereplication_utils.image_replicated(image=spec["destination"],
                                               logger=logger):
        logger.info("Skipped: Image previously replicated to ECR")
        replication["replicationStatus"] = "ECRImageExists"
    else:
        logger.info("Starting Replication")
        replication["replicationStatus"] = "Pending"

    patch["status"] = {"replication": replication}
    return replication["replicationStatus"]
def _install_helm_chart(
    helm_release: str,
    namespace: str,
    team: str,
    user: str,
    user_email: str,
    user_efsapid: str,
    repo: str,
    package: str,
    logger: kopf.Logger,
) -> bool:
    install_status = True
    # try to uninstall first
    try:
        cmd = f"helm uninstall --debug {helm_release} -n {team}"
        logger.debug("running cmd: %s", cmd)
        output = run_command(cmd)
        logger.debug(output)
        logger.info("finished cmd: %s", cmd)
    except Exception:
        logger.debug("helm uninstall did not find the release")

    cmd = (
        f"/usr/local/bin/helm upgrade --install --devel --debug --namespace {team} "
        f"{helm_release} {repo}/{package} "
        f"--set user={user},user_email={user_email},namespace={namespace},user_efsapid={user_efsapid}"
    )
    try:
        logger.debug("running cmd: %s", cmd)
        output = run_command(cmd)
        logger.debug(output)
        logger.info("finished cmd: %s", cmd)
    except Exception:
        logger.warning("errored cmd: %s", cmd)
        install_status = False
    return install_status
def _delete_custom_objects(  # type: ignore
    group: str, version: str, plural: str, namespace: str, logger: kopf.Logger, use_async=True, **_: Any
):

    logger.info(f"Deleting {plural}.{group} in ns {namespace}")
    co = CustomObjectsApi()

    try:
        resp = co.delete_collection_namespaced_custom_object(
            group=group,
            version=version,
            namespace=namespace,
            plural=plural,
            grace_period_seconds=0,
            propagation_policy="Background",
            pretty="true",
            async_req=use_async,
            body=V1DeleteOptions(),
        )

        return resp
    except ApiException as e:
        logger.warn("calling CustomObjectsApi->delete_collection_namespaced_custom_object: %s\n" % e)
        logger.warn("Assume it did not exist")
def _delete_user_efs_endpoint(user_name: str, user_namespace: str,
                              logger: kopf.Logger, meta: kopf.Meta) -> None:
    efs = boto3.client("efs")

    logger.info(
        f"Fetching the EFS access point in the namespace {user_namespace} for user {user_name}"
    )

    efs_access_point_id = meta.get("labels", {}).get("userEfsApId", None)

    logger.info(
        f"Deleting the EFS access point {efs_access_point_id} for user {user_name}"
    )

    try:
        efs.delete_access_point(AccessPointId=efs_access_point_id)
        logger.info(f"Access point {efs_access_point_id} deleted")
    except efs.exceptions.AccessPointNotFound:
        logger.warning(f"Access point not found: {efs_access_point_id}")
    except efs.exceptions.InternalServerError as e:
        logger.warning(e)
def install_team(
    name: str,
    meta: kopf.Meta,
    spec: kopf.Spec,
    status: kopf.Status,
    patch: kopf.Patch,
    podsettings_idx: kopf.Index[str, Dict[str, Any]],
    logger: kopf.Logger,
    **_: Any,
) -> str:
    logger.debug("loading kubeconfig")
    load_config()

    logger.info("processing userspace cr")
    logger.debug("namespace: %s", name)

    env = spec.get("env", None)
    space = spec.get("space", None)
    team = spec.get("team", None)
    user = spec.get("user", None)
    team_efsid = spec.get("teamEfsId", None)
    user_email = spec.get("userEmail", None)

    logger.debug("new namespace: %s,%s,%s,%s", team, user, user_email, name)

    if not env or not space or not team or not user or not team_efsid or not user_email:
        logger.error(
            ("All of env, space, team, user, team_efsid, and user_email are required."
             "Found: %s, %s, %s, %s, %s, %s"),
            env,
            space,
            team,
            user,
            team_efsid,
            user_email,
        )
        patch["metadata"] = {
            "annotations": {
                "orbit/helm-chart-installation": "Skipped"
            }
        }
        return "Skipping"

    client = dynamic_client()

    try:
        logger.info(f"Creating EFS endpoint for {team}-{user}...")
        efs_ep_resp = _create_user_efs_endpoint(user=user,
                                                team_name=team,
                                                team_efsid=team_efsid,
                                                env=env)
        access_point_id = efs_ep_resp.get("AccessPointId", "")
        logger.info(f"AccessPointId is {access_point_id}")
    except Exception as e:
        logger.error(
            f"Error while creating EFS access point for user_name={user} and team={team}: {e}"
        )
        patch["status"] = {
            "userSpaceOperator": {
                "installationStatus": "Failed to create EFS AccessPoint",
                "exception": str(e)
            }
        }
        return "Failed"

    team_context = _get_team_context(team=team, logger=logger)
    logger.info("team context keys: %s", team_context.keys())
    helm_repo_url = team_context["UserHelmRepository"]
    logger.debug("Adding Helm Repository: %s at %s", team, helm_repo_url)
    repo = f"{team}--userspace"
    # add the team repo
    unique_hash = "".join(
        random.choice(string.ascii_lowercase) for i in range(6))
    run_command(f"helm repo add {repo} {helm_repo_url}")
    try:
        # In isolated envs, we cannot refresh stable, and since we don't use it, we remove it
        run_command("helm repo remove stable")
    except Exception:
        logger.info(
            "Tried to remove stable repo...got an error, but moving on")
    run_command("helm repo update")
    run_command(
        f"helm search repo --devel {repo} -o json > /tmp/{unique_hash}-charts.json"
    )
    with open(f"/tmp/{unique_hash}-charts.json", "r") as f:
        charts = json.load(f)
    run_command(
        f"helm list -n {team} -o json > /tmp/{unique_hash}-releases.json")
    with open(f"/tmp/{unique_hash}-releases.json", "r") as f:
        releaseList = json.load(f)
        releases = [r["name"] for r in releaseList]
        logger.info("current installed releases: %s", releases)

    for chart in charts:
        chart_name = chart["name"].split("/")[1]
        helm_release = f"{name}-{chart_name}"
        # do not install again the chart if its already installed as some charts are not upgradable.
        # namespaces might
        if helm_release not in releaseList:
            # install the helm package for this user space
            logger.info(
                f"install the helm package chart_name={chart_name} helm_release={helm_release}"
            )
            install_status = _install_helm_chart(
                helm_release=helm_release,
                namespace=name,
                team=team,
                user=user,
                user_email=user_email,
                user_efsapid=access_point_id,
                repo=repo,
                package=chart_name,
                logger=logger,
            )
            if install_status:
                logger.info("Helm release %s installed at %s", helm_release,
                            name)
                continue
            else:
                patch["status"] = {
                    "userSpaceOperator": {
                        "installationStatus": "Failed to install",
                        "chart_name": chart_name
                    }
                }
                return "Failed"

    logger.info("Copying PodDefaults from Team")
    logger.info("podsettings_idx:%s", podsettings_idx)

    # Construct pseudo poddefaults for each podsetting in the team namespace
    poddefaults = [
        poddefault_utils.construct(
            name=ps["name"],
            desc=ps["spec"].get("desc", ""),
            labels={
                "orbit/space": "team",
                "orbit/team": team
            },
        ) for ps in podsettings_idx.get(team, [])
    ]
    poddefault_utils.copy_poddefaults_to_user_namespaces(
        client=client,
        poddefaults=poddefaults,
        user_namespaces=[name],
        logger=logger)

    patch["metadata"] = {
        "annotations": {
            "orbit/helm-chart-installation": "Complete"
        }
    }
    patch["metadata"] = {"labels": {"userEfsApId": access_point_id}}
    patch["status"] = {
        "userSpaceOperator": {
            "installationStatus": "Installed"
        }
    }

    return "Installed"
def uninstall_team_charts(
    name: str,
    annotations: kopf.Annotations,
    labels: kopf.Labels,
    spec: kopf.Spec,
    patch: kopf.Patch,
    logger: kopf.Logger,
    meta: kopf.Meta,
    **_: Any,
) -> str:
    logger.debug("loading kubeconfig")
    load_config()

    logger.info("processing removed namespace %s", name)
    space = spec.get("space", None)

    if space == "team":
        logger.info("delete all namespaces that belong to the team %s", name)
        run_command(f"kubectl delete profile -l orbit/team={name}")
        time.sleep(60)
        run_command(
            f"kubectl delete namespace -l orbit/team={name},orbit/space=user")
        logger.info("all namespaces that belong to the team %s are deleted",
                    name)
    elif space == "user":
        env = spec.get("env", None)
        team = spec.get("team", None)
        user = spec.get("user", None)
        user_email = spec.get("userEmail", None)

        logger.debug("removed namespace: %s,%s,%s,%s", team, user, user_email,
                     name)

        if not env or not space or not team or not user or not user_email:
            logger.error(
                "All of env, space, team, user, and user_email are required. Found: %s, %s, %s, %s, %s",
                env,
                space,
                team,
                user,
                user_email,
            )
            return "Skipping"

        _delete_user_efs_endpoint(user_name=user,
                                  user_namespace=f"{team}-{user}",
                                  logger=logger,
                                  meta=meta)
        team_context = _get_team_context(team=team, logger=logger)
        logger.info("team context keys: %s", team_context.keys())
        helm_repo_url = team_context["UserHelmRepository"]
        repo = f"{team}--userspace"
        # add the team repo
        unique_hash = "".join(
            random.choice(string.ascii_lowercase) for i in range(6))
        run_command(f"helm repo add {repo} {helm_repo_url}")
        run_command(
            f"helm search repo --devel {repo} -o json > /tmp/{unique_hash}-charts.json"
        )
        with open(f"/tmp/{unique_hash}-charts.json", "r") as f:
            charts = json.load(f)
        run_command(
            f"helm list -n {team} -o json > /tmp/{unique_hash}-releases.json")
        with open(f"/tmp/{unique_hash}-releases.json", "r") as f:
            releaseList = json.load(f)
            releases = [r["name"] for r in releaseList]
            logger.info("current installed releases: %s", releases)

        for chart in charts:
            chart_name = chart["name"].split("/")[1]
            helm_release = f"{name}-{chart_name}"
            if helm_release in releases:
                install_status = _uninstall_chart(helm_release=helm_release,
                                                  namespace=team,
                                                  logger=logger)

                if install_status:
                    logger.info("Helm release %s installed at %s",
                                helm_release, name)
                    continue
                else:
                    patch["status"] = {
                        "userSpaceOperator": {
                            "installationStatus": "Failed to uninstall",
                            "chart_name": chart_name
                        }
                    }
                    return "Failed"

    patch["status"] = {
        "userSpaceOperator": {
            "installationStatus": "Uninstalled"
        }
    }
    return "Uninstalled"
def apply_settings_to_pod(
    namespace: Dict[str, Any],
    podsetting: Dict[str, Any],
    pod: Dict[str, Any],
    logger: kopf.Logger,
) -> None:
    ps_spec = podsetting["spec"]
    pod_spec = pod["spec"]

    # Merge
    if "serviceAccountName" in ps_spec:
        pod_spec["serviceAccountName"] = ps_spec.get("serviceAccountName",
                                                     None)

    # Merge
    if "labels" in ps_spec:
        pod["metadata"]["labels"] = {
            **pod["metadata"].get("labels", {}),
            **ps_spec.get("labels", {}),
        }

    # Merge
    if "annotations" in ps_spec:
        pod["metadata"]["annotations"] = {
            **pod["metadata"].get("annotations", {}),
            **ps_spec.get("annotations", {}),
        }

    # Merge
    if "nodeSelector" in ps_spec:
        pod_spec["nodeSelector"] = {
            **pod_spec.get("nodeSelector", {}),
            **ps_spec.get("nodeSelector", {}),
        }
        # There exists a bug in some k8s client libs where a / in the key is interpretted as a path.
        # With annotations and labels, replacing / with ~1 like below works and ~1 is interpretted as
        # an escaped / char. This does not work here w/ the nodeSelector keys. Keys with a / are
        # interpretted as a jsonpath, and keys with ~1 are deemed invalid.
        # pod_spec["nodeSelector"] = {k.replace("/", "~1"): v for k, v in pod_spec["nodeSelector"].items()}

        # So instead, we strip out any path from the nodeSelector keys and use a multi-label approach
        # on our ManagedNodeGroups
        pod_spec["nodeSelector"] = {
            k.split("/")[-1]: v
            for k, v in pod_spec["nodeSelector"].items()
        }

    # Merge
    if "securityContext" in ps_spec:
        pod_spec["securityContext"] = {
            **pod_spec.get("securityContext", {}),
            **ps_spec.get("securityContext", {}),
        }

    # Merge
    if "volumes" in ps_spec:
        # Filter out any existing volumes with names that match podsetting volumes
        pod_spec["volumes"] = [
            pv for pv in pod_spec.get("volumes", []) if pv["name"] not in
            [psv["name"] for psv in ps_spec.get("volumes", [])]
        ]
        # Extend pod volumes with podsetting volumes
        pod_spec["volumes"].extend(ps_spec.get("volumes", []))

    # Merge
    for container in filter_pod_containers(
            containers=pod_spec.get("initContainers", []),
            pod=pod,
            container_selector=ps_spec.get("containerSelector", {}),
    ):
        apply_settings_to_container(namespace=namespace,
                                    podsetting=podsetting,
                                    pod=pod,
                                    container=container)
        logger.info(
            "Applied PodSetting %s to InitContainer %s",
            podsetting["name"],
            container["name"],
        )
    for container in filter_pod_containers(
            containers=pod_spec.get("containers", []),
            pod=pod,
            container_selector=ps_spec.get("containerSelector", {}),
    ):
        apply_settings_to_container(namespace=namespace,
                                    podsetting=podsetting,
                                    pod=pod,
                                    container=container)
        logger.info(
            "Applied PodSetting %s to Container %s",
            podsetting["name"],
            container["name"],
        )
    logger.info("Applied PodSetting %s to Pod", podsetting["name"])
Exemple #19
0
def update_pod_images(
    namespace: str,
    labels: kopf.Labels,
    body: kopf.Body,
    patch: kopf.Patch,
    dryrun: bool,
    logger: kopf.Logger,
    warnings: List[str],
    namespaces_idx: kopf.Index[str, Dict[str, Any]],
    podsettings_idx: kopf.Index[str, Dict[str, Any]],
    **_: Any,
) -> kopf.Patch:
    if dryrun:
        logger.debug("DryRun - Skip Pod Mutation")
        return patch

    # This is a hack to get the only namespace from the index Store
    ns: Dict[str, Any] = {}
    for ns in cast(List[Dict[str, Any]], namespaces_idx.get(namespace, [{}])):
        logger.debug("Namespace: %s", ns)

    team = ns.get("labels", {}).get("orbit/team", None)
    if not team:
        logger.info("No 'orbit/team' label found on Pod's Namespace: %s", namespace)
        # warnings.append(f"No 'orbit/team' label found on Pod's Namespace: {namespace}")
        return patch

    team_podsettings: List[Dict[str, Any]] = cast(List[Dict[str, Any]], podsettings_idx.get(team, []))
    if not team_podsettings:
        logger.info("No PodSettings found for Pod's Team: %s", team)
        # warnings.append(f"No PodSettings found for Pod's Team: {team}")
        return patch

    fitlered_podsettings = podsetting_utils.filter_podsettings(
        podsettings=team_podsettings, pod_labels=labels, logger=logger
    )
    if not fitlered_podsettings:
        logger.info("No PodSetting Selectors matched the Pod")
        return patch

    applied_podsetting_names = []
    body_dict = {
        "metadata": {k: v for k, v in body["metadata"].items()},
        "spec": {k: v for k, v in body["spec"].items()},
    }
    logger.debug("BodyDict: %s", body_dict)
    mutable_body = deepcopy(body)
    for podsetting in fitlered_podsettings:
        try:
            podsetting_utils.apply_settings_to_pod(namespace=ns, podsetting=podsetting, pod=mutable_body, logger=logger)
            applied_podsetting_names.append(podsetting["name"])
        except Exception as e:
            logger.exception("Error applying PodSetting %s: %s", podsetting["name"], str(e))
            warnings.append(f"Error applying PodSetting {podsetting['name']}: {str(e)}")

    if body_dict["spec"] == mutable_body["spec"] and body_dict["metadata"] == mutable_body["metadata"]:
        logger.warn("PodSetting Selectors matched the Pod but no changes were applied")
        warnings.append("PodSetting Selectors matched the Pod but no changes were applied")
        return patch

    patch["metadata"] = {}
    patch["metadata"]["annotations"] = {
        **mutable_body["metadata"].get("annotations", {}),
        **{"orbit/applied-podsettings": ",".join(applied_podsetting_names)},
    }
    patch["metadata"]["annotations"] = {k.replace("/", "~1"): v for k, v in patch["metadata"]["annotations"].items()}

    if "labels" in mutable_body["metadata"]:
        patch["metadata"]["labels"] = {k.replace("/", "~1"): v for k, v in mutable_body["metadata"]["labels"].items()}

    patch["spec"] = mutable_body["spec"]

    logger.info("Applying Patch %s", patch)
    return patch
def install_team(patch: kopf.Patch, logger: kopf.Logger, **_: Any) -> str:
    logger.info("In INSTALL_TEAM  Teamspace Controller")
    patch["status"] = {"teamspaceOperator": {"status": "Installed"}}
    return "Installed"