示例#1
0
def test_subhandler_imperatively(mocker):
    cause = mocker.MagicMock(event=UPDATE, diff=None)

    registry = SimpleRegistry()
    subregistry_var.set(registry)

    def fn(**_):
        pass
    kopf.register(fn)

    handlers = registry.get_handlers(cause)
    assert len(handlers) == 1
    assert handlers[0].fn is fn
示例#2
0
def test_subhandler_imperatively(mocker):
    cause = mocker.MagicMock(reason=Reason.UPDATE, diff=None)

    registry = ResourceRegistry()
    subregistry_var.set(registry)

    def fn(**_):
        pass

    kopf.register(fn)

    handlers = registry.get_resource_changing_handlers(cause)
    assert len(handlers) == 1
    assert handlers[0].fn is fn
示例#3
0
def test_subhandler_imperatively(parent_handler, cause_factory):
    cause = cause_factory(reason=Reason.UPDATE)

    registry = ResourceChangingRegistry()
    subregistry_var.set(registry)

    def fn(**_):
        pass

    with context([(handler_var, parent_handler)]):
        kopf.register(fn)

    handlers = registry.get_handlers(cause)
    assert len(handlers) == 1
    assert handlers[0].fn is fn
示例#4
0
def test_subhandler_imperatively(mocker, parent_handler):
    cause = mocker.MagicMock(reason=Reason.UPDATE, diff=None)

    registry = ResourceChangingRegistry()
    subregistry_var.set(registry)

    def fn(**_):
        pass

    with context([(handler_var, parent_handler)]):
        kopf.register(fn)

    handlers = registry.get_handlers(cause)
    assert len(handlers) == 1
    assert handlers[0].fn is fn
示例#5
0
def test_subhandler_imperatively(parent_handler, cause_factory):
    cause = cause_factory(reason=Reason.UPDATE)

    registry = SimpleRegistry()
    subregistry_var.set(registry)

    def fn(**_):
        pass

    with context([(handler_var, parent_handler)]):
        kopf.register(fn)

    with pytest.deprecated_call(match=r"cease using the internal registries"):
        handlers = registry.get_cause_handlers(cause)

    assert len(handlers) == 1
    assert handlers[0].fn is fn
示例#6
0
def test_subhandler_imperatively(mocker):
    cause = mocker.MagicMock(reason=Reason.UPDATE, diff=None)

    registry = SimpleRegistry()
    subregistry_var.set(registry)

    def fn(**_):
        pass

    kopf.register(fn)

    with pytest.deprecated_call(
            match=r"use ResourceChangingRegistry.get_handlers\(\)"):
        handlers = registry.get_cause_handlers(cause)

    assert len(handlers) == 1
    assert handlers[0].fn is fn
示例#7
0
async def secret_update(
    namespace: str,
    name: str,
    diff: kopf.Diff,
    logger: logging.Logger,
    **kwargs,
):
    async with ApiClient() as api_client:
        coapi = CustomObjectsApi(api_client)
        core = CoreV1Api(api_client)

        for operation, field_path, old_value, new_value in diff:
            custom_objects = await coapi.list_namespaced_custom_object(
                namespace=namespace,
                group=API_GROUP,
                version="v1",
                plural=RESOURCE_CRATEDB,
            )

            for crate_custom_object in custom_objects["items"]:
                host = await get_host(
                    core, namespace, crate_custom_object["metadata"]["name"]
                )

                for user_spec in crate_custom_object["spec"]["users"]:
                    expected_field_path = (
                        "data",
                        user_spec["password"]["secretKeyRef"]["key"],
                    )
                    if (
                        user_spec["password"]["secretKeyRef"]["name"] == name
                        and field_path == expected_field_path
                    ):
                        kopf.register(
                            fn=subhandler_partial(
                                update_user_password,
                                host,
                                user_spec["name"],
                                old_value,
                                new_value,
                                logger,
                            ),
                            id=f"update-{crate_custom_object['metadata']['name']}-{user_spec['name']}",  # noqa
                            timeout=config.BOOTSTRAP_TIMEOUT,
                        )
示例#8
0
 async def handle(  # type: ignore
     self,
     namespace: str,
     name: str,
     body: kopf.Body,
     old: kopf.Body,
     logger: logging.Logger,
     **kwargs: Any,
 ):
     kopf.register(
         fn=subhandler_partial(self._ensure_cronjob_suspended, namespace,
                               name, logger),
         id=DISABLE_CRONJOB_HANDLER_ID,
     )
     kopf.register(
         fn=subhandler_partial(self._ensure_no_snapshots_in_progress,
                               namespace, name, logger),
         id="ensure_no_snapshots_in_progress",
     )
     kopf.register(
         fn=subhandler_partial(self._ensure_no_backup_cronjobs_running,
                               namespace, name, logger),
         id="ensure_no_cronjobs_running",
     )
示例#9
0
async def cluster_update(
    namespace: str,
    name: str,
    patch: kopf.Patch,
    status: kopf.Status,
    diff: kopf.Diff,
    **kwargs,
):
    """
    Handle cluster updates.

    This is done as a chain of sub-handlers that depend on the previous ones completing.
    The state of each handler is stored in the status field of the CrateDB
    custom resource. Since the status field persists between runs of this handler
    (even for unrelated runs), we calculate and store a hash of what changed as well.
    This hash is then used by the sub-handlers to work out which run they are part of.

    i.e., consider this status:

    ::

        status:
          cluster_update:
            ref: 24b527bf0eada363bf548f19b98dd9cb
          cluster_update/ensure_enabled_cronjob:
            ref: 24b527bf0eada363bf548f19b98dd9cb
            success: true
          cluster_update/ensure_no_backups:
            ref: 24b527bf0eada363bf548f19b98dd9cb
            success: true
          cluster_update/scale:
            ref: 24b527bf0eada363bf548f19b98dd9cb
            success: true


    here ``status.cluster_update.ref`` is the hash of the last diff that was being acted
    upon. Since kopf *does not clean up statuses*, when we start a new run we check if
    the hash matches - if not, it means we can disregard any refs that are not for this
    run.
    """
    context = status.get(CLUSTER_UPDATE_ID)
    hash = hashlib.md5(str(diff).encode("utf-8")).hexdigest()
    if not context:
        context = {"ref": hash}
    elif context.get("ref", "") != hash:
        context["ref"] = hash

    do_upgrade = False
    do_restart = False
    do_scale = False
    for _, field_path, *_ in diff:
        if field_path in {
            ("spec", "cluster", "imageRegistry"),
            ("spec", "cluster", "version"),
        }:
            do_upgrade = True
            do_restart = True
        elif field_path == ("spec", "nodes", "master", "replicas"):
            do_scale = True
        elif field_path == ("spec", "nodes", "data"):
            do_scale = True

    depends_on = [f"{CLUSTER_UPDATE_ID}/ensure_no_backups"]
    kopf.register(
        fn=EnsureNoBackupsSubHandler(namespace, name, hash, context)(),
        id="ensure_no_backups",
        timeout=config.SCALING_TIMEOUT,
    )

    if do_upgrade:
        kopf.register(
            fn=UpgradeSubHandler(
                namespace, name, hash, context, depends_on=depends_on.copy()
            )(),
            id="upgrade",
        )
        depends_on.append(f"{CLUSTER_UPDATE_ID}/upgrade")

    if do_restart:
        kopf.register(
            fn=RestartSubHandler(
                namespace, name, hash, context, depends_on=depends_on.copy()
            )(),
            id="restart",
            timeout=config.ROLLING_RESTART_TIMEOUT,
        )
        depends_on.append(f"{CLUSTER_UPDATE_ID}/restart")

    if do_scale:
        kopf.register(
            fn=ScaleSubHandler(
                namespace, name, hash, context, depends_on=depends_on.copy()
            )(),
            id="scale",
            timeout=config.SCALING_TIMEOUT,
        )
        depends_on.append(f"{CLUSTER_UPDATE_ID}/scale")

    kopf.register(
        fn=EnsureCronjobReenabled(
            namespace,
            name,
            hash,
            context,
            depends_on=depends_on.copy(),
            run_on_dep_failures=True,
        )(),
        id="ensure_enabled_cronjob",
    )

    patch.status[CLUSTER_UPDATE_ID] = context
示例#10
0
async def cluster_create(
    namespace: str, meta: kopf.Meta, spec: kopf.Spec, logger: logging.Logger, **kwargs
):
    name = meta["name"]
    base_labels = {
        LABEL_MANAGED_BY: "crate-operator",
        LABEL_NAME: name,
        LABEL_PART_OF: "cratedb",
    }
    cratedb_labels = base_labels.copy()
    cratedb_labels[LABEL_COMPONENT] = "cratedb"
    cratedb_labels.update(meta.get("labels", {}))

    owner_references = [
        V1OwnerReference(
            api_version=f"{API_GROUP}/v1",
            block_owner_deletion=True,
            controller=True,
            kind="CrateDB",
            name=name,
            uid=meta["uid"],
        )
    ]

    image_pull_secrets = (
        [V1LocalObjectReference(name=secret) for secret in config.IMAGE_PULL_SECRETS]
        if config.IMAGE_PULL_SECRETS
        else None
    )

    ports_spec = spec.get("ports", {})
    http_port = ports_spec.get("http", Port.HTTP.value)
    jmx_port = ports_spec.get("jmx", Port.JMX.value)
    postgres_port = ports_spec.get("postgres", Port.POSTGRES.value)
    prometheus_port = ports_spec.get("prometheus", Port.PROMETHEUS.value)
    transport_port = ports_spec.get("transport", Port.TRANSPORT.value)

    master_nodes = get_master_nodes_names(spec["nodes"])
    total_nodes_count = get_total_nodes_count(spec["nodes"])
    crate_image = spec["cluster"]["imageRegistry"] + ":" + spec["cluster"]["version"]
    has_master_nodes = "master" in spec["nodes"]
    # The first StatefulSet we create references a set of master nodes. These
    # can either be explicit CrateDB master nodes, or implicit ones, which
    # would be the first set of nodes from the data nodes list.
    #
    # After the first StatefulSet was created, we set `treat_as_master` to
    # `False` to indicate that all remaining StatefulSets are neither explicit
    # nor implicit master nodes.
    treat_as_master = True
    cluster_name = spec["cluster"]["name"]

    kopf.register(
        fn=subhandler_partial(
            create_sql_exporter_config,
            owner_references,
            namespace,
            name,
            cratedb_labels,
            logger,
        ),
        id="sql_exporter_config",
    )

    kopf.register(
        fn=subhandler_partial(
            create_debug_volume,
            owner_references,
            namespace,
            name,
            cratedb_labels,
            logger,
        ),
        id="debug_volume",
    )

    kopf.register(
        fn=subhandler_partial(
            create_system_user,
            owner_references,
            namespace,
            name,
            cratedb_labels,
            logger,
        ),
        id="system_user",
    )

    kopf.register(
        fn=subhandler_partial(
            create_services,
            owner_references,
            namespace,
            name,
            cratedb_labels,
            http_port,
            postgres_port,
            transport_port,
            spec.get("cluster", {}).get("externalDNS"),
            logger,
        ),
        id="services",
    )

    if has_master_nodes:
        kopf.register(
            fn=subhandler_partial(
                create_statefulset,
                owner_references,
                namespace,
                name,
                cratedb_labels,
                treat_as_master,
                False,
                cluster_name,
                "master",
                "master-",
                spec["nodes"]["master"],
                master_nodes,
                total_nodes_count,
                http_port,
                jmx_port,
                postgres_port,
                prometheus_port,
                transport_port,
                crate_image,
                spec["cluster"].get("ssl"),
                spec["cluster"].get("settings"),
                image_pull_secrets,
                logger,
            ),
            id="statefulset_master",
        )
        treat_as_master = False

    for node_spec in spec["nodes"]["data"]:
        node_name = node_spec["name"]
        kopf.register(
            fn=subhandler_partial(
                create_statefulset,
                owner_references,
                namespace,
                name,
                cratedb_labels,
                treat_as_master,
                True,
                cluster_name,
                node_name,
                f"data-{node_name}-",
                node_spec,
                master_nodes,
                total_nodes_count,
                http_port,
                jmx_port,
                postgres_port,
                prometheus_port,
                transport_port,
                crate_image,
                spec["cluster"].get("ssl"),
                spec["cluster"].get("settings"),
                image_pull_secrets,
                logger,
            ),
            id=f"statefulset_data_{node_name}",
        )
        treat_as_master = False

    if has_master_nodes:
        master_node_pod = f"crate-master-{name}-0"
    else:
        node_name = spec["nodes"]["data"][0]["name"]
        master_node_pod = f"crate-data-{node_name}-{name}-0"

    kopf.register(
        fn=subhandler_partial(
            bootstrap_cluster,
            namespace,
            name,
            master_node_pod,
            spec["cluster"].get("license"),
            "ssl" in spec["cluster"],
            spec.get("users"),
            logger,
        ),
        id="bootstrap",
        timeout=config.BOOTSTRAP_TIMEOUT,
    )

    if "backups" in spec:
        if config.CLUSTER_BACKUP_IMAGE is None:
            logger.info(
                "Not deploying backup tools because no backup image is defined."
            )
        else:
            backup_metrics_labels = base_labels.copy()
            backup_metrics_labels[LABEL_COMPONENT] = "backup"
            backup_metrics_labels.update(meta.get("labels", {}))
            kopf.register(
                fn=subhandler_partial(
                    create_backups,
                    owner_references,
                    namespace,
                    name,
                    backup_metrics_labels,
                    http_port,
                    prometheus_port,
                    spec["backups"],
                    image_pull_secrets,
                    "ssl" in spec["cluster"],
                    logger,
                ),
                id="backup",
            )