Пример #1
0
async def test_conflict_logs(faker, namespace, caplog, api_client):
    caplog.set_level(logging.DEBUG, logger=__name__)
    core = CoreV1Api(api_client)
    name = faker.domain_word()
    ns = namespace.metadata.name
    password1 = faker.password(length=12)
    password2 = faker.password(length=12)
    await core.create_namespaced_secret(
        namespace=ns,
        body=V1Secret(
            data={"password": b64encode(password1)},
            metadata=V1ObjectMeta(name=name),
            type="Opaque",
        ),
    )
    await call_kubeapi(
        core.create_namespaced_secret,
        logger,
        continue_on_conflict=True,
        namespace=ns,
        body=V1Secret(
            data={"password": b64encode(password2)},
            metadata=V1ObjectMeta(name=name),
            type="Opaque",
        ),
    )
    secret = await core.read_namespaced_secret(name=name, namespace=ns)
    assert b64decode(secret.data["password"]) == password1
    assert (
        f"Failed creating V1Secret '{ns}/{name}' because it already exists. Continuing."
        in caplog.messages
    )
Пример #2
0
async def test_conflict_raises(faker, namespace, api_client):
    core = CoreV1Api(api_client)
    name = faker.domain_word()
    ns = namespace.metadata.name
    password1 = faker.password(length=12)
    password2 = faker.password(length=12)
    await core.create_namespaced_secret(
        namespace=ns,
        body=V1Secret(
            data={"password": b64encode(password1)},
            metadata=V1ObjectMeta(name=name),
            type="Opaque",
        ),
    )
    with pytest.raises(ApiException):
        await call_kubeapi(
            core.create_namespaced_secret,
            logger,
            namespace=ns,
            body=V1Secret(
                data={"password": b64encode(password2)},
                metadata=V1ObjectMeta(name=name),
                type="Opaque",
            ),
        )
    secret = await core.read_namespaced_secret(name=name, namespace=ns)
    assert b64decode(secret.data["password"]) == password1
Пример #3
0
def get_backup_metrics_exporter(
    owner_references: Optional[List[V1OwnerReference]],
    name: str,
    labels: LabelType,
    http_port: int,
    prometheus_port: int,
    backup_aws: Dict[str, Any],
    image_pull_secrets: Optional[List[V1LocalObjectReference]],
    has_ssl: bool,
) -> V1Deployment:
    env = [
        V1EnvVar(name="EXPORTER_PORT", value=str(prometheus_port)),
        V1EnvVar(name="PYTHONWARNINGS",
                 value="ignore:Unverified HTTPS request"),
        V1EnvVar(name="REPOSITORY_PREFIX", value="system_backup"),
    ] + get_backup_env(name, http_port, backup_aws, has_ssl)
    return V1Deployment(
        metadata=V1ObjectMeta(
            name=f"backup-metrics-{name}",
            labels=labels,
            owner_references=owner_references,
        ),
        spec=V1DeploymentSpec(
            replicas=1,
            selector=V1LabelSelector(match_labels={
                LABEL_COMPONENT: "backup",
                LABEL_NAME: name
            }),
            template=V1PodTemplateSpec(
                metadata=V1ObjectMeta(
                    annotations={
                        "prometheus.io/port": str(prometheus_port),
                        "prometheus.io/scrape": "true",
                    },
                    labels=labels,
                    name=f"backup-metrics-{name}",
                ),
                spec=V1PodSpec(
                    containers=[
                        V1Container(
                            command=["metrics-exporter", "-vv"],
                            env=env,
                            image=config.CLUSTER_BACKUP_IMAGE,
                            name="metrics-exporter",
                            ports=[
                                V1ContainerPort(
                                    container_port=prometheus_port,
                                    name="backup-metrics",
                                )
                            ],
                        )
                    ],
                    image_pull_secrets=image_pull_secrets,
                    restart_policy="Always",
                ),
            ),
        ),
    )
Пример #4
0
    def _build_secret_for_service_token(self, parent: GafaelfawrServiceToken,
                                        token: Token) -> V1Secret:
        """Construct a new Secret object.

        Parameters
        ----------
        parent : `GafaelfawrServiceSecret`
            The parent GafaelfawrServiceSecret object.
        token : `gafaelfawr.models.token.Token`
            The Gafaelfawr token to store in the secret.
        """
        return V1Secret(
            api_version="v1",
            kind="Secret",
            data={"token": self._encode_token(token)},
            metadata=V1ObjectMeta(
                name=parent.name,
                namespace=parent.namespace,
                annotations=parent.annotations,
                labels=parent.labels,
                owner_references=[
                    V1OwnerReference(
                        api_version="gafaelfawr.lsst.io/v1alpha1",
                        block_owner_deletion=True,
                        controller=True,
                        kind="GafaelfawrServiceToken",
                        name=parent.name,
                        uid=parent.uid,
                    ),
                ],
            ),
            type="Opaque",
        )
Пример #5
0
 def _make_pod(
     self,
     username: str,
     volumes: List[Dict[str, Any]],
     containers: List[Dict[str, Any]],
     dossier: Dossier,
     pull_secret_name: Optional[str] = None,
 ) -> V1Pod:
     spec = self._make_pod_spec(
         username=username,
         volumes=volumes,
         containers=containers,
         dossier=dossier,
         pull_secret_name=pull_secret_name,
     )
     pname = _name_object(username, "pod")
     md = V1ObjectMeta(
         name=pname,
         namespace=self.namespace,
         owner_references=[
             V1OwnerReference(
                 api_version="v1",
                 kind="Pod",
                 name=read_pod_info("name"),
                 uid=read_pod_info("uid"),
             )
         ],
     )
     pod = V1Pod(metadata=md, spec=spec)
     return pod
Пример #6
0
 def _create_dossier_configmap(self, dossier: Dossier) -> V1ConfigMap:
     """Build the configmap containing the dossier that will be
     mounted to the working container.  Dossier will be in JSON
     format, purely because Python includes a json parser but not
     a yaml parser in its standard library.
     """
     cmname = _name_object(dossier.username, "cm")
     djson = json.dumps(dossier.dict(), sort_keys=True, indent=4)
     data = {"dossier.json": djson}
     cm = V1ConfigMap(
         metadata=V1ObjectMeta(
             name=cmname,
             namespace=self.namespace,
             owner_references=[
                 V1OwnerReference(
                     api_version="v1",
                     kind="Pod",
                     name=read_pod_info("name"),
                     uid=read_pod_info("uid"),
                 )
             ],
         ),
         data=data,
     )
     return cm
Пример #7
0
def get_sql_exporter_config(owner_references: Optional[List[V1OwnerReference]],
                            name: str, labels: LabelType) -> V1ConfigMap:
    sql_exporter_config = pkgutil.get_data("crate.operator",
                                           "data/sql-exporter.yaml")
    responsivity_collector_config = pkgutil.get_data(
        "crate.operator",
        "data/responsivity-collector.yaml",
    )
    if sql_exporter_config and responsivity_collector_config:
        return V1ConfigMap(
            metadata=V1ObjectMeta(
                name=f"crate-sql-exporter-{name}",
                labels=labels,
                owner_references=owner_references,
            ),
            data={
                "sql-exporter.yaml":
                sql_exporter_config.decode(),
                "responsivity-collector.yaml":
                responsivity_collector_config.decode(),
            },
        )
    else:
        warnings.warn(
            "Cannot load or missing SQL Exporter or Responsivity Collector config!"
        )
Пример #8
0
async def assert_kubernetes_secrets_are_correct(
    factory: ComponentFactory, mock: MockKubernetesApi, is_fresh: bool = True
) -> None:
    token_service = factory.create_token_service()

    # Get all of the GafaelfawrServiceToken custom objects.
    service_tokens = mock.get_all_objects_for_test("GafaelfawrServiceToken")

    # Calculate the expected secrets.
    expected = [
        V1Secret(
            api_version="v1",
            kind="Secret",
            data={"token": ANY},
            metadata=V1ObjectMeta(
                name=t["metadata"]["name"],
                namespace=t["metadata"]["namespace"],
                annotations=t["metadata"].get("annotations", {}),
                labels=t["metadata"].get("labels", {}),
                owner_references=[
                    V1OwnerReference(
                        api_version="gafaelfawr.lsst.io/v1alpha1",
                        block_owner_deletion=True,
                        controller=True,
                        kind="GafaelfawrServiceToken",
                        name=t["metadata"]["name"],
                        uid=t["metadata"]["uid"],
                    ),
                ],
            ),
            type="Opaque",
        )
        for t in service_tokens
    ]
    expected = sorted(
        expected, key=lambda o: (o.metadata.namespace, o.metadata.name)
    )
    assert mock.get_all_objects_for_test("Secret") == expected

    # Now check that every token in those secrets is correct.
    for service_token in service_tokens:
        name = service_token["metadata"]["name"]
        namespace = service_token["metadata"]["namespace"]
        secret = await mock.read_namespaced_secret(name, namespace)
        data = await token_data_from_secret(token_service, secret)
        assert data == TokenData(
            token=data.token,
            username=service_token["spec"]["service"],
            token_type=TokenType.service,
            scopes=service_token["spec"]["scopes"],
            created=data.created,
            expires=None,
            name=None,
            uid=None,
            groups=None,
        )
        if is_fresh:
            now = current_datetime()
            assert now - timedelta(seconds=5) <= data.created <= now
Пример #9
0
async def namespace(faker, api_client) -> V1Namespace:
    core = CoreV1Api(api_client)
    name = faker.uuid4()
    await assert_wait_for(False, does_namespace_exist, core, name)
    ns: V1Namespace = await core.create_namespace(body=V1Namespace(
        metadata=V1ObjectMeta(name=name)))
    await assert_wait_for(True, does_namespace_exist, core, name)
    yield ns
    await core.delete_namespace(name=ns.metadata.name, body=V1DeleteOptions())
Пример #10
0
async def test_mock(mock_kubernetes: MockKubernetesApi) -> None:
    custom: Dict[str, Any] = {
        "apiVersion": "gafaelfawr.lsst.io/v1alpha1",
        "kind": "GafaelfawrServiceToken",
        "metadata": {
            "name": "gafaelfawr-secret",
            "namespace": "mobu",
            "generation": 1,
        },
        "spec": {
            "service": "mobu",
            "scopes": ["admin:token"],
        },
    }
    secret = V1Secret(
        api_version="v1",
        kind="Secret",
        data={"token": "bogus"},
        metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"),
        type="Opaque",
    )
    await mock_kubernetes.create_namespaced_custom_object(
        "gafaelfawr.lsst.io",
        "v1alpha1",
        custom["metadata"]["namespace"],
        "gafaelfawrservicetokens",
        custom,
    )
    await mock_kubernetes.create_namespaced_secret(secret.metadata.namespace,
                                                   secret)

    assert await mock_kubernetes.list_cluster_custom_object(
        "gafaelfawr.lsst.io", "v1alpha1", "gafaelfawrservicetokens") == {
            "items": [{
                **custom, "metadata": {
                    **custom["metadata"], "uid": ANY
                }
            }]
        }
    assert mock_kubernetes.get_all_objects_for_test("Secret") == [secret]

    def error(method: str, *args: Any) -> None:
        assert method == "replace_namespaced_custom_object"
        raise ValueError("some exception")

    mock_kubernetes.error_callback = error
    with pytest.raises(ValueError) as excinfo:
        await mock_kubernetes.replace_namespaced_custom_object(
            "gafaelfawr.lsst.io",
            "v1alpha1",
            custom["metadata"]["namespace"],
            "gafaelfawrservicetokens",
            "gafaelfawr-secret",
            custom,
        )
    assert str(excinfo.value) == "some exception"
Пример #11
0
def get_system_user_secret(owner_references: Optional[List[V1OwnerReference]],
                           name: str, labels: LabelType) -> V1Secret:
    return V1Secret(
        data={"password": b64encode(gen_password(50))},
        metadata=V1ObjectMeta(
            name=f"user-system-{name}",
            labels=labels,
            owner_references=owner_references,
        ),
        type="Opaque",
    )
Пример #12
0
def get_data_service(
    owner_references: Optional[List[V1OwnerReference]],
    name: str,
    labels: LabelType,
    http_port: int,
    postgres_port: int,
    dns_record: Optional[str],
) -> V1Service:
    annotations = {}
    if config.CLOUD_PROVIDER == CloudProvider.AWS:
        # https://kubernetes.io/docs/concepts/services-networking/service/#connection-draining-on-aws
        annotations.update({
            "service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled":
            "true",  # noqa
            "service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout":
            "1800",  # noqa
        })
    elif config.CLOUD_PROVIDER == CloudProvider.AZURE:
        # https://docs.microsoft.com/en-us/azure/aks/load-balancer-standard#additional-customizations-via-kubernetes-annotations
        # https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-tcp-reset
        annotations.update({
            "service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset":
            "false",  # noqa
            "service.beta.kubernetes.io/azure-load-balancer-tcp-idle-timeout":
            "30",  # noqa
        })

    if dns_record:
        annotations.update(
            {"external-dns.alpha.kubernetes.io/hostname": dns_record})

    return V1Service(
        metadata=V1ObjectMeta(
            annotations=annotations,
            labels=labels,
            name=f"crate-{name}",
            owner_references=owner_references,
        ),
        spec=V1ServiceSpec(
            ports=[
                V1ServicePort(name="http", port=http_port, target_port=4200),
                V1ServicePort(name="psql",
                              port=postgres_port,
                              target_port=5432),
            ],
            selector={
                LABEL_COMPONENT: "cratedb",
                LABEL_NAME: name
            },
            type="LoadBalancer",
            external_traffic_policy="Local",
        ),
    )
Пример #13
0
def get_statefulset_pvc(
        owner_references: Optional[List[V1OwnerReference]],
        node_spec: Dict[str, Any]) -> List[V1PersistentVolumeClaim]:
    size = format_bitmath(
        bitmath.parse_string_unsafe(node_spec["resources"]["disk"]["size"]))
    storage_class_name = node_spec["resources"]["disk"]["storageClass"]
    return [
        V1PersistentVolumeClaim(
            metadata=V1ObjectMeta(name=f"data{i}",
                                  owner_references=owner_references),
            spec=V1PersistentVolumeClaimSpec(
                access_modes=["ReadWriteOnce"],
                resources=V1ResourceRequirements(requests={"storage": size}),
                storage_class_name=storage_class_name,
            ),
        ) for i in range(node_spec["resources"]["disk"]["count"])
    ]
Пример #14
0
def get_debug_persistent_volume_claim(
        owner_references: Optional[List[V1OwnerReference]], name: str,
        labels: LabelType) -> V1PersistentVolumeClaim:
    return V1PersistentVolumeClaim(
        metadata=V1ObjectMeta(
            name=f"local-resource-{name}",
            labels=labels,
            owner_references=owner_references,
        ),
        spec=V1PersistentVolumeClaimSpec(
            access_modes=["ReadWriteOnce"],
            resources=V1ResourceRequirements(
                requests={"storage": format_bitmath(config.DEBUG_VOLUME_SIZE)
                          }),
            storage_class_name=config.DEBUG_VOLUME_STORAGE_CLASS,
        ),
    )
Пример #15
0
async def test_success(faker, namespace, api_client):
    core = CoreV1Api(api_client)
    name = faker.domain_word()
    password = faker.password(length=12)
    await call_kubeapi(
        core.create_namespaced_secret,
        logger,
        namespace=namespace.metadata.name,
        body=V1Secret(
            data={"password": b64encode(password)},
            metadata=V1ObjectMeta(name=name),
            type="Opaque",
        ),
    )
    secret = await core.read_namespaced_secret(
        name=name, namespace=namespace.metadata.name
    )
    assert b64decode(secret.data["password"]) == password
Пример #16
0
def get_sql_exporter_config(owner_references: Optional[List[V1OwnerReference]],
                            name: str, labels: LabelType) -> V1ConfigMap:

    sql_exporter_config = pkgutil.get_data("crate.operator",
                                           "data/sql-exporter.yaml")

    if sql_exporter_config:
        # Parse the config yaml file to get the defined collectors and load them
        parsed_sql_exporter_config = yaml.load(sql_exporter_config.decode())
        collectors = parsed_sql_exporter_config["target"]["collectors"]

        result = V1ConfigMap(
            metadata=V1ObjectMeta(
                name=f"crate-sql-exporter-{name}",
                labels=labels,
                owner_references=owner_references,
            ),
            data={
                "sql-exporter.yaml": sql_exporter_config.decode(),
            },
        )

        # Add the yaml collectors to the configmap dynamically
        for collector in collectors:
            # Remove the `_collector` suffix from the collector name if present
            if collector.endswith("_collector"):
                collector = collector[:-10]
            yaml_filename = (
                f"{collector}-collector.yaml"  # Notice the `-` instead of `_`!
            )
            collector_config = pkgutil.get_data("crate.operator",
                                                f"data/{yaml_filename}")

            if collector_config is None:
                raise FileNotFoundError(
                    f"Could not load config for collector {collector}")
            result.data[yaml_filename] = collector_config.decode()

        return result
    else:
        warnings.warn(
            "Cannot load or missing SQL Exporter or Responsivity Collector config!"
        )
Пример #17
0
async def ensure_user_password_label(core: CoreV1Api, namespace: str,
                                     secret_name: str):
    """
    Add the LABEL_USER_PASSWORD label to a namespaced secret.

    During testing, the function returns the public IP address, because the
    operator doesn't run inside Kubernetes during tests but outside. And the
    only way to connect to the CrateDB cluster is to go through the public
    interface.

    :param core: An instance of the Kubernetes Core V1 API.
    :param namespace: The namespace where the Kubernetes Secret is deployed.
    :param secret_name: The name of the Kubernetes Secret.
    """
    await core.patch_namespaced_secret(
        namespace=namespace,
        name=secret_name,
        body=V1Secret(metadata=V1ObjectMeta(
            labels={LABEL_USER_PASSWORD: "******"}, ), ),
    )
Пример #18
0
def get_discovery_service(
    owner_references: Optional[List[V1OwnerReference]],
    name: str,
    labels: LabelType,
    transport_port: int,
) -> V1Service:
    return V1Service(
        metadata=V1ObjectMeta(
            name=f"crate-discovery-{name}",
            labels=labels,
            owner_references=owner_references,
        ),
        spec=V1ServiceSpec(
            ports=[V1ServicePort(name="cluster", port=transport_port)],
            selector={
                LABEL_COMPONENT: "cratedb",
                LABEL_NAME: name
            },
        ),
    )
Пример #19
0
def get_debug_persistent_volume(
    owner_references: Optional[List[V1OwnerReference]],
    namespace: str,
    name: str,
    labels: LabelType,
) -> V1PersistentVolume:
    return V1PersistentVolume(
        metadata=V1ObjectMeta(
            name=f"temp-pv-{namespace}-{name}",
            labels=labels,
            owner_references=owner_references,
        ),
        spec=V1PersistentVolumeSpec(
            access_modes=["ReadWriteOnce"],
            capacity={"storage": format_bitmath(config.DEBUG_VOLUME_SIZE)},
            host_path=V1HostPathVolumeSource(
                path=f"/mnt/resource/{namespace}-{name}"),
            storage_class_name=config.DEBUG_VOLUME_STORAGE_CLASS,
        ),
    )
Пример #20
0
async def test_resume_set_secret_labels(
    faker,
    namespace,
    cleanup_handler,
    kopf_runner,
    api_client,
):
    core = CoreV1Api(api_client)
    name = faker.domain_word()
    password1 = faker.password(length=40)
    password2 = faker.password(length=40)

    cleanup_handler.append(
        core.delete_persistent_volume(
            name=f"temp-pv-{namespace.metadata.name}-{name}"))

    await asyncio.gather(
        core.create_namespaced_secret(
            namespace=namespace.metadata.name,
            body=V1Secret(
                data={"password": b64encode(password1)},
                metadata=V1ObjectMeta(name=f"user-{name}-1"),
                type="Opaque",
            ),
        ),
        core.create_namespaced_secret(
            namespace=namespace.metadata.name,
            body=V1Secret(
                data={"password": b64encode(password2)},
                metadata=V1ObjectMeta(name=f"user-{name}-2",
                                      labels={LABEL_USER_PASSWORD: "******"}),
                type="Opaque",
            ),
        ),
    )

    secret_1 = await core.read_namespaced_secret(
        name=f"user-{name}-1", namespace=namespace.metadata.name)
    secret_2 = await core.read_namespaced_secret(
        name=f"user-{name}-2", namespace=namespace.metadata.name)

    assert secret_1.metadata.labels is None
    assert LABEL_USER_PASSWORD in secret_2.metadata.labels

    await update_cratedb_resource(
        namespace=namespace.metadata.name,
        name=f"user-{name}",
        spec={
            "cluster": {
                "imageRegistry": "crate",
                "name": "my-crate-cluster",
                "version": CRATE_VERSION,
            },
            "nodes": {
                "data": [{
                    "name": "data",
                    "replicas": 1,
                    "resources": {
                        "cpus": 0.5,
                        "disk": {
                            "count": 1,
                            "size": "16GiB",
                            "storageClass": "default",
                        },
                        "heapRatio": 0.25,
                        "memory": "1Gi",
                    },
                }]
            },
            "users": [
                {
                    "name": name,
                    "password": {
                        "secretKeyRef": {
                            "key": b64encode(password1),
                            "name": f"user-{name}-1",
                        }
                    },
                },
                {
                    "name": name,
                    "password": {
                        "secretKeyRef": {
                            "key": b64encode(password1),
                            "name": f"user-{name}-2",
                        }
                    },
                },
            ],
        },
    )

    secret_1_after_resume = await core.read_namespaced_secret(
        name=f"user-{name}-1", namespace=namespace.metadata.name)
    secret_2_after_resume = await core.read_namespaced_secret(
        name=f"user-{name}-2", namespace=namespace.metadata.name)

    assert LABEL_USER_PASSWORD in secret_1_after_resume.metadata.labels
    assert LABEL_USER_PASSWORD in secret_2_after_resume.metadata.labels
Пример #21
0
async def test_create_kubernetes_resources(
    kubernetes_api_mock: KubernetesApiMock, ) -> None:
    spawner = Mock(spec=KubeSpawner)
    spawner.k8s_api_request_timeout = 3
    spawner.k8s_api_request_retry_timeout = 30
    spawner.namespace = "nublado2-someuser"
    spawner.extra_annotations = {
        "argocd.argoproj.io/compare-options": "IgnoreExtraneous",
        "argocd.argoproj.io/sync-options": "Prune=false",
    }
    spawner.extra_labels = {
        "hub.jupyter.org/network-access-hub": "true",
        "argocd.argoproj.io/instance": "nublado-users",
    }
    spawner._make_create_resource_request = kubernetes_api_mock.create_object
    spawner.hub = Mock()
    spawner.hub.base_url = "/nb/hub/"
    spawner.user = Mock(spec=User)
    spawner.user.name = "someuser"
    spawner.api = kubernetes_api_mock
    auth_state = {
        "token": "user-token",
        "uid": 1234,
        "groups": [{
            "name": "foo",
            "id": 1235
        }, {
            "name": "bar",
            "id": 4567
        }],
    }
    pod_manifest = V1Pod(
        api_version="v1",
        kind="Pod",
        metadata=V1ObjectMeta(
            name="user-pod",
            namespace=spawner.namespace,
        ),
        spec=V1PodSpec(containers=[
            V1Container(
                name="container",
                command=["run-something"],
                env=[V1EnvVar(name="FOO", value="BAR")],
                image="blah:latest",
            )
        ], ),
    )
    if sys.version_info < (3, 8):
        spawner.get_pod_manifest.return_value = asyncio.Future()
        spawner.get_pod_manifest.return_value.set_result(pod_manifest)
        spawner.user.get_auth_state.return_value = asyncio.Future()
        spawner.user.get_auth_state.return_value.set_result(auth_state)
    else:
        spawner.get_pod_manifest.return_value = pod_manifest
        spawner.user.get_auth_state.return_value = auth_state

    options = Mock(spec=SelectedOptions)
    options.debug = "true"
    options.clear_dotlocal = "true"
    options.image_info = ImageInfo(
        reference="registry.hub.docker.com/lsstsqre/sciplat-lab:w_2021_13",
        display_name="blah blah blah",
        digest="sha256:123456789abcdef",
    )

    resource_manager = ResourceManager()
    await resource_manager._create_kubernetes_resources(spawner, options)

    assert sorted(
        kubernetes_api_mock.objects,
        key=lambda o: (o["kind"], o["metadata"]["name"]),
    ) == [
        {
            "apiVersion": "v1",
            "kind": "ConfigMap",
            "metadata": {
                "name": "dask",
                "namespace": spawner.namespace,
                "annotations": spawner.extra_annotations,
                "labels": spawner.extra_labels,
            },
            "data": {
                "dask_worker.yml":
                f"""\
apiVersion: v1
kind: Pod
metadata:
  namespace: {spawner.namespace}
spec:
  containers:
    - command:
        - run-something
      env:
        - name: FOO
          value: BAR
        - name: DASK_WORKER
          value: 'TRUE'
      image: blah:latest
      name: container
"""
            },
        },
        {
            "apiVersion": "v1",
            "kind": "ConfigMap",
            "metadata": {
                "name": "group",
                "namespace": spawner.namespace,
                "annotations": spawner.extra_annotations,
                "labels": spawner.extra_labels,
            },
            "data": {
                "group": ("someuser:x:1234:\n"
                          "foo:x:1235:someuser\n"
                          "bar:x:4567:someuser\n")
            },
        },
        {
            "apiVersion": "v1",
            "kind": "ConfigMap",
            "metadata": {
                "name": "lab-environment",
                "namespace": spawner.namespace,
                "annotations": spawner.extra_annotations,
                "labels": spawner.extra_labels,
            },
            "data": {
                "EXTERNAL_INSTANCE_URL": "https://data.example.com/",
                "FIREFLY_ROUTE": "/portal/app",
                "HUB_ROUTE": "/nb/hub/",
                "EXTERNAL_GROUPS": "foo:1235,bar:4567",
                "EXTERNAL_UID": "1234",
                "ACCESS_TOKEN": "user-token",
                "IMAGE_DIGEST": "sha256:123456789abcdef",
                "IMAGE_DESCRIPTION": "blah blah blah",
                "CLEAR_DOTLOCAL": "true",
                "DEBUG": "true",
            },
        },
    ]

    assert sorted(
        kubernetes_api_mock.custom,
        key=lambda o: (o["kind"], o["metadata"]["name"]),
    ) == [{
        "apiVersion": "ricoberger.de/v1alpha1",
        "kind": "VaultSecret",
        "metadata": {
            "name": "butler-secret",
            "namespace": spawner.namespace,
            "annotations": spawner.extra_annotations,
            "labels": spawner.extra_labels,
        },
        "spec": {
            "path": "k8s_operator/data/butler",
            "type": "Opaque",
        },
    }]
Пример #22
0
def get_backup_cronjob(
    owner_references: Optional[List[V1OwnerReference]],
    name: str,
    labels: LabelType,
    http_port: int,
    backup_aws: Dict[str, Any],
    image_pull_secrets: Optional[List[V1LocalObjectReference]],
    has_ssl: bool,
) -> V1beta1CronJob:
    env = [
        V1EnvVar(
            name="AWS_ACCESS_KEY_ID",
            value_from=V1EnvVarSource(
                secret_key_ref=V1SecretKeySelector(
                    key=backup_aws["accessKeyId"]["secretKeyRef"]["key"],
                    name=backup_aws["accessKeyId"]["secretKeyRef"]["name"],
                ),
            ),
        ),
        V1EnvVar(
            name="AWS_SECRET_ACCESS_KEY",
            value_from=V1EnvVarSource(
                secret_key_ref=V1SecretKeySelector(
                    key=backup_aws["secretAccessKey"]["secretKeyRef"]["key"],
                    name=backup_aws["secretAccessKey"]["secretKeyRef"]["name"],
                ),
            ),
        ),
        V1EnvVar(name="CLUSTER_ID", value=name),
        V1EnvVar(name="PYTHONWARNINGS", value="ignore:Unverified HTTPS request"),
        V1EnvVar(name="REPOSITORY_PREFIX", value="system_backup"),
    ] + get_backup_env(name, http_port, backup_aws, has_ssl)
    return V1beta1CronJob(
        metadata=V1ObjectMeta(
            name=f"create-snapshot-{name}",
            labels=labels,
            owner_references=owner_references,
        ),
        spec=V1beta1CronJobSpec(
            concurrency_policy="Forbid",
            failed_jobs_history_limit=1,
            job_template=V1beta1JobTemplateSpec(
                metadata=V1ObjectMeta(labels=labels, name=f"create-snapshot-{name}"),
                spec=V1JobSpec(
                    template=V1PodTemplateSpec(
                        metadata=V1ObjectMeta(
                            labels=labels, name=f"create-snapshot-{name}",
                        ),
                        spec=V1PodSpec(
                            containers=[
                                V1Container(
                                    command=["backup", "-vv"],
                                    env=env,
                                    image=config.CLUSTER_BACKUP_IMAGE,
                                    name="backup",
                                )
                            ],
                            image_pull_secrets=image_pull_secrets,
                            restart_policy="Never",
                        ),
                    ),
                ),
            ),
            schedule=backup_aws["cron"],
            successful_jobs_history_limit=1,
        ),
    )
Пример #23
0
def get_statefulset(
    owner_references: Optional[List[V1OwnerReference]],
    namespace: str,
    name: str,
    labels: LabelType,
    treat_as_master: bool,
    treat_as_data: bool,
    cluster_name: str,
    node_name: str,
    node_name_prefix: str,
    node_spec: Dict[str, Any],
    master_nodes: List[str],
    total_nodes_count: int,
    http_port: int,
    jmx_port: int,
    postgres_port: int,
    prometheus_port: int,
    transport_port: int,
    crate_image: str,
    ssl: Optional[Dict[str, Any]],
    cluster_settings: Optional[Dict[str, str]],
    image_pull_secrets: Optional[List[V1LocalObjectReference]],
    logger: logging.Logger,
) -> V1StatefulSet:
    node_annotations = node_spec.get("annotations", {})
    node_annotations.update({
        "prometheus.io/port": str(prometheus_port),
        "prometheus.io/scrape": "true"
    })
    node_labels = labels.copy()
    node_labels.update(node_spec.get("labels", {}))
    # This is to identify pods of the same cluster but with a different node type
    node_labels[LABEL_NODE_NAME] = node_name
    full_pod_name_prefix = f"crate-{node_name_prefix}{name}"

    containers = get_statefulset_containers(
        node_spec,
        http_port,
        jmx_port,
        postgres_port,
        prometheus_port,
        transport_port,
        crate_image,
        get_statefulset_crate_command(
            namespace=namespace,
            name=name,
            master_nodes=master_nodes,
            total_nodes_count=total_nodes_count,
            crate_node_name_prefix=node_name_prefix,
            cluster_name=cluster_name,
            node_name=node_name,
            node_spec=node_spec,
            cluster_settings=cluster_settings,
            has_ssl=bool(ssl),
            is_master=treat_as_master,
            is_data=treat_as_data,
        ),
        get_statefulset_crate_env(node_spec, jmx_port, prometheus_port, ssl),
        get_statefulset_crate_volume_mounts(node_spec, ssl),
    )

    return V1StatefulSet(
        metadata=V1ObjectMeta(
            annotations=node_spec.get("annotations"),
            labels=node_labels,
            name=full_pod_name_prefix,
            owner_references=owner_references,
        ),
        spec=V1StatefulSetSpec(
            pod_management_policy="Parallel",
            replicas=node_spec["replicas"],
            selector=V1LabelSelector(
                match_labels={
                    LABEL_COMPONENT: "cratedb",
                    LABEL_NAME: name,
                    LABEL_NODE_NAME: node_name,
                }),
            service_name="cratedb",
            template=V1PodTemplateSpec(
                metadata=V1ObjectMeta(
                    annotations=node_annotations,
                    labels=node_labels,
                ),
                spec=V1PodSpec(
                    affinity=get_statefulset_affinity(name, logger),
                    containers=containers,
                    image_pull_secrets=image_pull_secrets,
                    init_containers=get_statefulset_init_containers(
                        crate_image),
                    volumes=get_statefulset_volumes(name, ssl),
                ),
            ),
            update_strategy=V1StatefulSetUpdateStrategy(type="OnDelete"),
            volume_claim_templates=get_statefulset_pvc(owner_references,
                                                       node_spec),
        ),
    )
Пример #24
0
async def test_route_commission(
    client: AsyncClient,
    dossier: Dossier,
    mock_kubernetes: MockKubernetesApi,
    mock_kubernetes_watch: MockKubernetesWatch,
) -> None:
    r = await client.post("/moneypenny/users", json=dossier.dict())
    assert r.status_code == 303
    assert r.headers["Location"] == url_for(f"users/{dossier.username}")

    r = await client.get(f"/moneypenny/users/{dossier.username}")
    assert r.status_code == 200
    data = r.json()
    assert data == {
        "username": dossier.username,
        "status": "commissioning",
        "last_changed": ANY,
        "uid": dossier.uid,
        "groups": [g.dict() for g in dossier.groups],
    }

    # Requesting the exact same thing again even though it's not complete is
    # fine and produces the same redirect.
    r = await client.post("/moneypenny/users", json=dossier.dict())
    assert r.status_code == 303
    assert r.headers["Location"] == url_for(f"users/{dossier.username}")

    assert mock_kubernetes.get_all_objects_for_test("ConfigMap") == [
        V1ConfigMap(
            metadata=V1ObjectMeta(
                name=f"{dossier.username}-cm",
                namespace="default",
                owner_references=[
                    V1OwnerReference(
                        api_version="v1",
                        kind="Pod",
                        name="moneypenny-78547dcf97-9xqq8",
                        uid="00386592-214f-40c5-88e1-b9657d53a7c6",
                    )
                ],
            ),
            data={
                "dossier.json":
                json.dumps(dossier.dict(), sort_keys=True, indent=4)
            },
        )
    ]
    assert mock_kubernetes.get_all_objects_for_test("Pod") == [
        V1Pod(
            metadata=V1ObjectMeta(
                name=f"{dossier.username}-pod",
                namespace="default",
                owner_references=[
                    V1OwnerReference(
                        api_version="v1",
                        kind="Pod",
                        name="moneypenny-78547dcf97-9xqq8",
                        uid="00386592-214f-40c5-88e1-b9657d53a7c6",
                    )
                ],
            ),
            spec=V1PodSpec(
                automount_service_account_token=False,
                containers=[{
                    "name":
                    "farthing",
                    "image":
                    "lsstsqre/farthing",
                    "securityContext": {
                        "runAsUser": 1000,
                        "runAsNonRootUser": True,
                        "allowPrivilegeEscalation": False,
                    },
                    "volumeMounts": [
                        {
                            "mountPath": "/homedirs",
                            "name": "homedirs",
                        },
                        {
                            "mountPath": "/opt/dossier",
                            "name": f"dossier-{dossier.username}-vol",
                            "readOnly": True,
                        },
                    ],
                }],
                image_pull_secrets=[],
                init_containers=[],
                restart_policy="OnFailure",
                security_context=V1PodSecurityContext(run_as_group=1000,
                                                      run_as_user=1000),
                volumes=[
                    {
                        "name": "homedirs",
                        "nfs": {
                            "server": "10.10.10.10",
                            "path": "/homedirs",
                        },
                    },
                    V1Volume(
                        name=f"dossier-{dossier.username}-vol",
                        config_map=V1ConfigMapVolumeSource(
                            default_mode=0o644,
                            name=f"{dossier.username}-cm",
                        ),
                    ),
                ],
            ),
            status=V1PodStatus(phase="Running"),
        )
    ]

    await wait_for_completion(client, dossier.username, mock_kubernetes,
                              mock_kubernetes_watch)
Пример #25
0
async def create_statefulset(
    owner_references: Optional[List[V1OwnerReference]],
    namespace: str,
    name: str,
    labels: LabelType,
    treat_as_master: bool,
    treat_as_data: bool,
    cluster_name: str,
    node_name: str,
    node_name_prefix: str,
    node_spec: Dict[str, Any],
    master_nodes: List[str],
    total_nodes_count: int,
    http_port: int,
    jmx_port: int,
    postgres_port: int,
    prometheus_port: int,
    transport_port: int,
    crate_image: str,
    ssl: Optional[Dict[str, Any]],
    cluster_settings: Optional[Dict[str, str]],
    image_pull_secrets: Optional[List[V1LocalObjectReference]],
    logger: logging.Logger,
) -> None:
    async with ApiClient() as api_client:
        apps = AppsV1Api(api_client)
        await call_kubeapi(
            apps.create_namespaced_stateful_set,
            logger,
            continue_on_conflict=True,
            namespace=namespace,
            body=get_statefulset(
                owner_references,
                namespace,
                name,
                labels,
                treat_as_master,
                treat_as_data,
                cluster_name,
                node_name,
                node_name_prefix,
                node_spec,
                master_nodes,
                total_nodes_count,
                http_port,
                jmx_port,
                postgres_port,
                prometheus_port,
                transport_port,
                crate_image,
                ssl,
                cluster_settings,
                image_pull_secrets,
                logger,
            ),
        )
        policy = PolicyV1beta1Api(api_client)
        pdb = V1beta1PodDisruptionBudget(
            metadata=V1ObjectMeta(
                name=f"crate-{name}",
                owner_references=owner_references,
            ),
            spec=V1beta1PodDisruptionBudgetSpec(
                max_unavailable=1,
                selector=V1LabelSelector(
                    match_labels={
                        LABEL_COMPONENT: "cratedb",
                        LABEL_NAME: name,
                        LABEL_NODE_NAME: node_name,
                    }),
            ),
        )
        """
           A Pod Distruption Budget ensures that when performing Kubernetes cluster
           maintenance (i.e. upgrades), we make sure to not disrupt more than
           1 pod in a StatefulSet at a time.
        """
        await call_kubeapi(
            policy.create_namespaced_pod_disruption_budget,
            logger,
            continue_on_conflict=True,
            namespace=namespace,
            body=pdb,
        )
Пример #26
0
async def test_bootstrap_users(
    bootstrap_license_mock: mock.AsyncMock,
    faker,
    namespace,
    cleanup_handler,
    kopf_runner,
):
    coapi = CustomObjectsApi()
    core = CoreV1Api()
    name = faker.domain_word()
    password1 = faker.password(length=40)
    password2 = faker.password(length=30)
    username1 = faker.user_name()
    username2 = faker.user_name()

    cleanup_handler.append(
        core.delete_persistent_volume(name=f"temp-pv-{namespace.metadata.name}-{name}")
    )
    await asyncio.gather(
        core.create_namespaced_secret(
            namespace=namespace.metadata.name,
            body=V1Secret(
                data={"password": b64encode(password1)},
                metadata=V1ObjectMeta(name=f"user-{name}-1"),
                type="Opaque",
            ),
        ),
        core.create_namespaced_secret(
            namespace=namespace.metadata.name,
            body=V1Secret(
                data={"password": b64encode(password2)},
                metadata=V1ObjectMeta(name=f"user-{name}-2"),
                type="Opaque",
            ),
        ),
    )

    await coapi.create_namespaced_custom_object(
        group=API_GROUP,
        version="v1",
        plural=RESOURCE_CRATEDB,
        namespace=namespace.metadata.name,
        body={
            "apiVersion": "cloud.crate.io/v1",
            "kind": "CrateDB",
            "metadata": {"name": name},
            "spec": {
                "cluster": {
                    "imageRegistry": "crate",
                    "name": "my-crate-cluster",
                    "version": "4.1.5",
                },
                "nodes": {
                    "data": [
                        {
                            "name": "data",
                            "replicas": 1,
                            "resources": {
                                "cpus": 0.5,
                                "memory": "1Gi",
                                "heapRatio": 0.25,
                                "disk": {
                                    "storageClass": "default",
                                    "size": "16GiB",
                                    "count": 1,
                                },
                            },
                        }
                    ]
                },
                "users": [
                    {
                        "name": username1,
                        "password": {
                            "secretKeyRef": {
                                "key": "password",
                                "name": f"user-{name}-1",
                            }
                        },
                    },
                    {
                        "name": username2,
                        "password": {
                            "secretKeyRef": {
                                "key": "password",
                                "name": f"user-{name}-2",
                            }
                        },
                    },
                ],
            },
        },
    )

    host = await asyncio.wait_for(
        get_public_host(core, namespace.metadata.name, name),
        timeout=BACKOFF_TIME * 5,  # It takes a while to retrieve an external IP on AKS.
    )

    password_system = await get_system_user_password(
        namespace.metadata.name, name, core
    )
    await assert_wait_for(
        True,
        does_user_exist,
        host,
        password_system,
        SYSTEM_USERNAME,
        timeout=BACKOFF_TIME * 5,
    )

    await assert_wait_for(
        True, does_user_exist, host, password1, username1, timeout=BACKOFF_TIME * 3,
    )

    await assert_wait_for(
        True, does_user_exist, host, password2, username2, timeout=BACKOFF_TIME * 3,
    )
Пример #27
0
async def test_update_cluster_password(faker, namespace, cleanup_handler,
                                       kopf_runner, api_client):
    coapi = CustomObjectsApi(api_client)
    core = CoreV1Api(api_client)
    name = faker.domain_word()
    password = faker.password(length=40)
    new_password = faker.password(length=40)
    username = faker.user_name()

    cleanup_handler.append(
        core.delete_persistent_volume(
            name=f"temp-pv-{namespace.metadata.name}-{name}"))
    await asyncio.gather(
        core.create_namespaced_secret(
            namespace=namespace.metadata.name,
            body=V1Secret(
                data={"password": b64encode(password)},
                metadata=V1ObjectMeta(name=f"user-{name}",
                                      labels={LABEL_USER_PASSWORD: "******"}),
                type="Opaque",
            ),
        ), )

    await coapi.create_namespaced_custom_object(
        group=API_GROUP,
        version="v1",
        plural=RESOURCE_CRATEDB,
        namespace=namespace.metadata.name,
        body={
            "apiVersion": "cloud.crate.io/v1",
            "kind": "CrateDB",
            "metadata": {
                "name": name
            },
            "spec": {
                "cluster": {
                    "imageRegistry": "crate",
                    "name": "my-crate-cluster",
                    "version": CRATE_VERSION,
                },
                "nodes": {
                    "data": [{
                        "name": "data",
                        "replicas": 1,
                        "resources": {
                            "cpus": 0.5,
                            "memory": "1Gi",
                            "heapRatio": 0.25,
                            "disk": {
                                "storageClass": "default",
                                "size": "16GiB",
                                "count": 1,
                            },
                        },
                    }]
                },
                "users": [
                    {
                        "name": username,
                        "password": {
                            "secretKeyRef": {
                                "key": "password",
                                "name": f"user-{name}",
                            }
                        },
                    },
                ],
            },
        },
    )

    host = await asyncio.wait_for(
        get_public_host(core, namespace.metadata.name, name),
        # It takes a while to retrieve an external IP on AKS.
        timeout=DEFAULT_TIMEOUT * 5,
    )

    await core.patch_namespaced_secret(
        namespace=namespace.metadata.name,
        name=f"user-{name}",
        body=V1Secret(data={"password": b64encode(new_password)}, ),
    )

    await assert_wait_for(
        True,
        is_password_set,
        host,
        new_password,
        username,
        timeout=DEFAULT_TIMEOUT * 5,
    )
Пример #28
0
async def test_modify(
    factory: ComponentFactory,
    mock_kubernetes: MockKubernetesApi,
    caplog: LogCaptureFixture,
) -> None:
    await create_test_service_tokens(mock_kubernetes)
    kubernetes_service = factory.create_kubernetes_service(MagicMock())
    token_service = factory.create_token_service()

    # Valid secret but with a bogus token.
    secret = V1Secret(
        api_version="v1",
        kind="Secret",
        data={"token": "bogus"},
        metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"),
        type="Opaque",
    )
    await mock_kubernetes.create_namespaced_secret("mobu", secret)

    # Valid secret but with a nonexistent token.
    secret = V1Secret(
        api_version="v1",
        kind="Secret",
        data={"token": token_as_base64(Token())},
        metadata=V1ObjectMeta(
            name="gafaelfawr",
            namespace="nublado2",
            labels={
                "foo": "bar",
                "other": "blah",
            },
            annotations={
                "argocd.argoproj.io/compare-options": "IgnoreExtraneous",
                "argocd.argoproj.io/sync-options": "Prune=false",
            },
        ),
        type="Opaque",
    )
    await mock_kubernetes.create_namespaced_secret("nublado2", secret)

    # Update the secrets.  This should replace both with fresh secrets.
    await kubernetes_service.update_service_tokens()
    await assert_kubernetes_secrets_are_correct(factory, mock_kubernetes)

    # Check the logging.
    assert parse_log(caplog) == [
        {
            "event": "Created new service token",
            "key": ANY,
            "severity": "info",
            "token_scope": "admin:token",
            "token_username": "******",
        },
        {
            "event": "Updated mobu/gafaelfawr-secret secret",
            "scopes": ["admin:token"],
            "severity": "info",
            "service": "mobu",
        },
        {
            "event": "Created new service token",
            "key": ANY,
            "severity": "info",
            "token_scope": "",
            "token_username": "******",
        },
        {
            "event": "Updated nublado2/gafaelfawr secret",
            "scopes": [],
            "severity": "info",
            "service": "nublado-hub",
        },
    ]

    # Replace one secret with a valid token for the wrong service.
    async with factory.session.begin():
        token = await token_service.create_token_from_admin_request(
            AdminTokenRequest(
                username="******",
                token_type=TokenType.service,
                scopes=["admin:token"],
            ),
            TokenData.internal_token(),
            ip_address=None,
        )
    secret = V1Secret(
        api_version="v1",
        kind="Secret",
        data={"token": token_as_base64(token)},
        metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"),
        type="Opaque",
    )
    await mock_kubernetes.replace_namespaced_secret(
        "gafaelfawr-secret", "mobu", secret
    )

    # Replace the other token with a valid token with the wrong scopes.
    async with factory.session.begin():
        token = await token_service.create_token_from_admin_request(
            AdminTokenRequest(
                username="******",
                token_type=TokenType.service,
                scopes=["read:all"],
            ),
            TokenData.internal_token(),
            ip_address=None,
        )
    secret = V1Secret(
        api_version="v1",
        kind="Secret",
        data={"token": token_as_base64(token)},
        metadata=V1ObjectMeta(name="gafaelfawr", namespace="nublado2"),
        type="Opaque",
    )
    await mock_kubernetes.replace_namespaced_secret(
        "gafaelfawr", "nublado2", secret
    )

    # Update the secrets.  This should create new tokens for both.
    await kubernetes_service.update_service_tokens()
    await assert_kubernetes_secrets_are_correct(factory, mock_kubernetes)
    nublado_secret = await mock_kubernetes.read_namespaced_secret(
        "gafaelfawr", "nublado2"
    )

    # Finally, replace a secret with one with no token.
    secret = V1Secret(
        api_version="v1",
        data={},
        metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"),
        type="Opaque",
    )
    await mock_kubernetes.replace_namespaced_secret(
        "gafaelfawr-secret", "mobu", secret
    )

    # Update the secrets.  This should create a new token for the first secret
    # but not for the second.
    await kubernetes_service.update_service_tokens()
    await assert_kubernetes_secrets_are_correct(
        factory, mock_kubernetes, is_fresh=False
    )
    assert nublado_secret == await mock_kubernetes.read_namespaced_secret(
        "gafaelfawr", "nublado2"
    )
Пример #29
0
async def test_errors_replace_read(
    factory: ComponentFactory, mock_kubernetes: MockKubernetesApi
) -> None:
    await create_test_service_tokens(mock_kubernetes)
    kubernetes_service = factory.create_kubernetes_service(MagicMock())
    token_service = factory.create_token_service()

    # Create a secret that should exist but has an invalid token.
    secret = V1Secret(
        api_version="v1",
        data={"token": token_as_base64(Token())},
        metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"),
        type="Opaque",
    )
    await mock_kubernetes.create_namespaced_secret("mobu", secret)

    # Simulate some errors.  The callback function takes the operation and the
    # secret name.
    def error_callback_replace(method: str, *args: Any) -> None:
        if method in ("replace_namespaced_secret"):
            raise ApiException(status=500, reason="Some error")

    mock_kubernetes.error_callback = error_callback_replace

    # Now run the synchronization.  The secret should be left unchanged, but
    # we should still create the missing nublado2 secret.
    await kubernetes_service.update_service_tokens()
    objects = mock_kubernetes.get_all_objects_for_test("Secret")
    assert secret in objects
    good_secret = await mock_kubernetes.read_namespaced_secret(
        "gafaelfawr", "nublado2"
    )
    assert await token_data_from_secret(token_service, good_secret)

    # We should have also updated the status of the parent custom object.
    service_token = await mock_kubernetes.get_namespaced_custom_object(
        "gafaelfawr.lsst.io",
        "v1alpha1",
        "mobu",
        "gafaelfawrservicetokens",
        "gafaelfawr-secret",
    )
    assert service_token["status"]["conditions"] == [
        {
            "lastTransitionTime": ANY,
            "message": "Kubernetes API error: (500)\nReason: Some error\n",
            "observedGeneration": 1,
            "reason": StatusReason.Failed.value,
            "status": "False",
            "type": "SecretCreated",
        }
    ]

    # Try again, but simulating an error in retrieving a secret.
    def error_callback_read(method: str, *args: Any) -> None:
        if method == "read_namespaced_secret":
            raise ApiException(status=500, reason="Some error")

    mock_kubernetes.error_callback = error_callback_read

    # Now run the synchronization.  As before, the secret should be left
    # unchanged, and the good secret should also be left unchanged.
    await kubernetes_service.update_service_tokens()
    objects = mock_kubernetes.get_all_objects_for_test("Secret")
    assert secret in objects
Пример #30
0
async def test_bootstrap_license(
    bootstrap_system_user: mock.AsyncMock,
    bootstrap_license_mock: mock.AsyncMock,
    faker,
    namespace,
    cleanup_handler,
    kopf_runner,
    api_client,
):
    coapi = CustomObjectsApi(api_client)
    core = CoreV1Api(api_client)
    name = faker.domain_word()
    license = base64.b64encode(faker.binary(64)).decode()

    cleanup_handler.append(
        core.delete_persistent_volume(
            name=f"temp-pv-{namespace.metadata.name}-{name}"), )
    await core.create_namespaced_secret(
        namespace=namespace.metadata.name,
        body=V1Secret(
            data={"license": b64encode(license)},
            metadata=V1ObjectMeta(name=f"license-{name}"),
            type="Opaque",
        ),
    )
    await coapi.create_namespaced_custom_object(
        group=API_GROUP,
        version="v1",
        plural=RESOURCE_CRATEDB,
        namespace=namespace.metadata.name,
        body={
            "apiVersion": "cloud.crate.io/v1",
            "kind": "CrateDB",
            "metadata": {
                "name": name
            },
            "spec": {
                "cluster": {
                    "imageRegistry": "crate",
                    "license": {
                        "secretKeyRef": {
                            "key": "license",
                            "name": f"license-{name}"
                        },
                    },
                    "name": "my-crate-cluster",
                    "version": CRATE_VERSION,
                },
                "nodes": {
                    "data": [{
                        "name": "data",
                        "replicas": 1,
                        "resources": {
                            "cpus": 0.5,
                            "memory": "1Gi",
                            "heapRatio": 0.25,
                            "disk": {
                                "storageClass": "default",
                                "size": "16GiB",
                                "count": 1,
                            },
                        },
                    }]
                },
            },
        },
    )
    await assert_wait_for(
        True,
        was_license_set,
        bootstrap_license_mock,
        mock.ANY,
        namespace.metadata.name,
        f"crate-data-data-{name}-0",
        False,
        {"secretKeyRef": {
            "key": "license",
            "name": f"license-{name}"
        }},
        timeout=DEFAULT_TIMEOUT * 3,
    )