def get_statefulset_init_containers(crate_image: str) -> List[V1Container]: return [ V1Container( # We need to do this in an init container because of the required # security context. We don't want to run CrateDB with that context, # thus doing it before. command=["sysctl", "-w", "vm.max_map_count=262144"], image="busybox", name="init-sysctl", security_context=V1SecurityContext(privileged=True), ), V1Container( command=[ "wget", "-O", f"/jmxdir/crate-jmx-exporter-{config.JMX_EXPORTER_VERSION}.jar", f"https://dl.bintray.com/crate/crate/io/crate/crate-jmx-exporter/{config.JMX_EXPORTER_VERSION}/crate-jmx-exporter-{config.JMX_EXPORTER_VERSION}.jar", # noqa ], image="busybox", name="fetch-jmx-exporter", volume_mounts=[V1VolumeMount(name="jmxdir", mount_path="/jmxdir")], ), V1Container( command=[ "sh", "-c", "mkdir -pv /resource/heapdump ; chown -R crate:crate /resource", ], image=crate_image, name="mkdir-heapdump", volume_mounts=[ V1VolumeMount(name="resource", mount_path="/resource") ], ), ]
def get_backup_metrics_exporter( owner_references: Optional[List[V1OwnerReference]], name: str, labels: LabelType, http_port: int, prometheus_port: int, backup_aws: Dict[str, Any], image_pull_secrets: Optional[List[V1LocalObjectReference]], has_ssl: bool, ) -> V1Deployment: env = [ V1EnvVar(name="EXPORTER_PORT", value=str(prometheus_port)), V1EnvVar(name="PYTHONWARNINGS", value="ignore:Unverified HTTPS request"), V1EnvVar(name="REPOSITORY_PREFIX", value="system_backup"), ] + get_backup_env(name, http_port, backup_aws, has_ssl) return V1Deployment( metadata=V1ObjectMeta( name=f"backup-metrics-{name}", labels=labels, owner_references=owner_references, ), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels={ LABEL_COMPONENT: "backup", LABEL_NAME: name }), template=V1PodTemplateSpec( metadata=V1ObjectMeta( annotations={ "prometheus.io/port": str(prometheus_port), "prometheus.io/scrape": "true", }, labels=labels, name=f"backup-metrics-{name}", ), spec=V1PodSpec( containers=[ V1Container( command=["metrics-exporter", "-vv"], env=env, image=config.CLUSTER_BACKUP_IMAGE, name="metrics-exporter", ports=[ V1ContainerPort( container_port=prometheus_port, name="backup-metrics", ) ], ) ], image_pull_secrets=image_pull_secrets, restart_policy="Always", ), ), ), )
def get_statefulset_containers( node_spec: Dict[str, Any], http_port: int, jmx_port: int, postgres_port: int, prometheus_port: int, transport_port: int, crate_image: str, crate_command: List[str], crate_env: List[V1EnvVar], crate_volume_mounts: List[V1VolumeMount], ) -> List[V1Container]: # There is no official release of 0.6, so let's use our own build # from commit 1498107. Also, because it's a private registry, let's use the # official release during tests so we don't need Docker secrets. # https://github.com/free/sql_exporter/commit/1498107 sql_exporter_image = "cloud.registry.cr8.net/crate/sql-exporter:1498107" if config.TESTING: sql_exporter_image = "githubfree/sql_exporter:latest" return [ V1Container( command=[ "/bin/sql_exporter", "-config.file=/config/sql-exporter.yaml", "-web.listen-address=:9399", "-web.metrics-path=/metrics", ], image=sql_exporter_image, name="sql-exporter", ports=[V1ContainerPort(container_port=9399, name="sql-exporter")], volume_mounts=[ V1VolumeMount( mount_path="/config", name="crate-sql-exporter", read_only=True, ), ], ), V1Container( command=crate_command, env=crate_env, image=crate_image, name="crate", ports=[ V1ContainerPort(container_port=http_port, name="http"), V1ContainerPort(container_port=jmx_port, name="jmx"), V1ContainerPort(container_port=postgres_port, name="postgres"), V1ContainerPort(container_port=prometheus_port, name="prometheus"), V1ContainerPort(container_port=transport_port, name="transport"), ], readiness_probe=V1Probe( http_get=V1HTTPGetAction(path="/ready", port=prometheus_port), initial_delay_seconds=30, period_seconds=10, ), resources=V1ResourceRequirements( limits={ "cpu": str(node_spec["resources"]["cpus"]), "memory": format_bitmath( bitmath.parse_string_unsafe( node_spec["resources"]["memory"])), }, requests={ "cpu": str(node_spec["resources"]["cpus"]), "memory": format_bitmath( bitmath.parse_string_unsafe( node_spec["resources"]["memory"])), }, ), volume_mounts=crate_volume_mounts, ), ]
def get_backup_cronjob( owner_references: Optional[List[V1OwnerReference]], name: str, labels: LabelType, http_port: int, backup_aws: Dict[str, Any], image_pull_secrets: Optional[List[V1LocalObjectReference]], has_ssl: bool, ) -> V1beta1CronJob: env = [ V1EnvVar( name="AWS_ACCESS_KEY_ID", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( key=backup_aws["accessKeyId"]["secretKeyRef"]["key"], name=backup_aws["accessKeyId"]["secretKeyRef"]["name"], ), ), ), V1EnvVar( name="AWS_SECRET_ACCESS_KEY", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( key=backup_aws["secretAccessKey"]["secretKeyRef"]["key"], name=backup_aws["secretAccessKey"]["secretKeyRef"]["name"], ), ), ), V1EnvVar(name="CLUSTER_ID", value=name), V1EnvVar(name="PYTHONWARNINGS", value="ignore:Unverified HTTPS request"), V1EnvVar(name="REPOSITORY_PREFIX", value="system_backup"), ] + get_backup_env(name, http_port, backup_aws, has_ssl) return V1beta1CronJob( metadata=V1ObjectMeta( name=f"create-snapshot-{name}", labels=labels, owner_references=owner_references, ), spec=V1beta1CronJobSpec( concurrency_policy="Forbid", failed_jobs_history_limit=1, job_template=V1beta1JobTemplateSpec( metadata=V1ObjectMeta(labels=labels, name=f"create-snapshot-{name}"), spec=V1JobSpec( template=V1PodTemplateSpec( metadata=V1ObjectMeta( labels=labels, name=f"create-snapshot-{name}", ), spec=V1PodSpec( containers=[ V1Container( command=["backup", "-vv"], env=env, image=config.CLUSTER_BACKUP_IMAGE, name="backup", ) ], image_pull_secrets=image_pull_secrets, restart_policy="Never", ), ), ), ), schedule=backup_aws["cron"], successful_jobs_history_limit=1, ), )
async def test_create_kubernetes_resources( kubernetes_api_mock: KubernetesApiMock, ) -> None: spawner = Mock(spec=KubeSpawner) spawner.k8s_api_request_timeout = 3 spawner.k8s_api_request_retry_timeout = 30 spawner.namespace = "nublado2-someuser" spawner.extra_annotations = { "argocd.argoproj.io/compare-options": "IgnoreExtraneous", "argocd.argoproj.io/sync-options": "Prune=false", } spawner.extra_labels = { "hub.jupyter.org/network-access-hub": "true", "argocd.argoproj.io/instance": "nublado-users", } spawner._make_create_resource_request = kubernetes_api_mock.create_object spawner.hub = Mock() spawner.hub.base_url = "/nb/hub/" spawner.user = Mock(spec=User) spawner.user.name = "someuser" spawner.api = kubernetes_api_mock auth_state = { "token": "user-token", "uid": 1234, "groups": [{ "name": "foo", "id": 1235 }, { "name": "bar", "id": 4567 }], } pod_manifest = V1Pod( api_version="v1", kind="Pod", metadata=V1ObjectMeta( name="user-pod", namespace=spawner.namespace, ), spec=V1PodSpec(containers=[ V1Container( name="container", command=["run-something"], env=[V1EnvVar(name="FOO", value="BAR")], image="blah:latest", ) ], ), ) if sys.version_info < (3, 8): spawner.get_pod_manifest.return_value = asyncio.Future() spawner.get_pod_manifest.return_value.set_result(pod_manifest) spawner.user.get_auth_state.return_value = asyncio.Future() spawner.user.get_auth_state.return_value.set_result(auth_state) else: spawner.get_pod_manifest.return_value = pod_manifest spawner.user.get_auth_state.return_value = auth_state options = Mock(spec=SelectedOptions) options.debug = "true" options.clear_dotlocal = "true" options.image_info = ImageInfo( reference="registry.hub.docker.com/lsstsqre/sciplat-lab:w_2021_13", display_name="blah blah blah", digest="sha256:123456789abcdef", ) resource_manager = ResourceManager() await resource_manager._create_kubernetes_resources(spawner, options) assert sorted( kubernetes_api_mock.objects, key=lambda o: (o["kind"], o["metadata"]["name"]), ) == [ { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "dask", "namespace": spawner.namespace, "annotations": spawner.extra_annotations, "labels": spawner.extra_labels, }, "data": { "dask_worker.yml": f"""\ apiVersion: v1 kind: Pod metadata: namespace: {spawner.namespace} spec: containers: - command: - run-something env: - name: FOO value: BAR - name: DASK_WORKER value: 'TRUE' image: blah:latest name: container """ }, }, { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "group", "namespace": spawner.namespace, "annotations": spawner.extra_annotations, "labels": spawner.extra_labels, }, "data": { "group": ("someuser:x:1234:\n" "foo:x:1235:someuser\n" "bar:x:4567:someuser\n") }, }, { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "lab-environment", "namespace": spawner.namespace, "annotations": spawner.extra_annotations, "labels": spawner.extra_labels, }, "data": { "EXTERNAL_INSTANCE_URL": "https://data.example.com/", "FIREFLY_ROUTE": "/portal/app", "HUB_ROUTE": "/nb/hub/", "EXTERNAL_GROUPS": "foo:1235,bar:4567", "EXTERNAL_UID": "1234", "ACCESS_TOKEN": "user-token", "IMAGE_DIGEST": "sha256:123456789abcdef", "IMAGE_DESCRIPTION": "blah blah blah", "CLEAR_DOTLOCAL": "true", "DEBUG": "true", }, }, ] assert sorted( kubernetes_api_mock.custom, key=lambda o: (o["kind"], o["metadata"]["name"]), ) == [{ "apiVersion": "ricoberger.de/v1alpha1", "kind": "VaultSecret", "metadata": { "name": "butler-secret", "namespace": spawner.namespace, "annotations": spawner.extra_annotations, "labels": spawner.extra_labels, }, "spec": { "path": "k8s_operator/data/butler", "type": "Opaque", }, }]