Пример #1
0
def test_celery_backend_with_redis_with_password(template: HelmTemplate):
    redis_password = "******"
    redis_host = "host"
    redis_port = 6379
    broker_db_number = 20
    backend_db_number = 21
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            usePassword=True,
            password=redis_password,
            host=redis_host,
            port=redis_port,
            brokerDbNumber=broker_db_number,
            backendDbNumber=backend_db_number,
        ),
    )

    [secret] = template.render(helm_values)

    expected_celery_broker = (
        f"redis://:{redis_password}@{redis_host}:{redis_port}/{broker_db_number}"
    )
    expected_celery_backend = (
        f"redis://:{redis_password}@{redis_host}:{redis_port}/{backend_db_number}"
    )

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(expected_celery_broker, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(expected_celery_backend, encoding="utf-8")
    ).decode("utf-8")
Пример #2
0
def test_celery_secret_renders(template: HelmTemplate):
    helm_values_generate_celery_secret_enabled = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
    )

    secrets = template.render(helm_values_generate_celery_secret_enabled)

    assert len(secrets) == 1
Пример #3
0
def test_celery_backend_override_only_one(template: HelmTemplate):
    custom_url = "host:6380,password=password,ssl=True"
    default_url = "redis://myhost:6379/0"

    # Override broker, not backend
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            brokerUrl=custom_url,
            host="myhost",
        ),
    )

    [secret] = template.render(helm_values)

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(custom_url, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(default_url, encoding="utf-8")
    ).decode("utf-8")

    # Override backend, not broker
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            backendUrl=custom_url,
            host="myhost",
        ),
    )

    [secret] = template.render(helm_values)

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(default_url, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(custom_url, encoding="utf-8")
    ).decode("utf-8")
Пример #4
0
def test_celery_queue_default_image_tag_is_chart_version(
        deployment_template: HelmTemplate, chart_version: str):
    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY))

    celery_queue_deployments = deployment_template.render(
        helm_values, chart_version=chart_version)

    assert len(celery_queue_deployments) == 1

    image = celery_queue_deployments[0].spec.template.spec.containers[0].image
    _, image_tag = image.split(":")

    assert image_tag == chart_version
Пример #5
0
def test_celery_queue_empty_run_launcher_config_source(
        deployment_template: HelmTemplate,
        celery_queue_configmap_template: HelmTemplate):
    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2,
            "configSource": {
                "worker_concurrency": 3
            }
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1,
            "configSource": {
                "worker_concurrency": 4
            }
        },
    ]

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    workerQueues=[
                        CeleryWorkerQueue(**workerQueue)
                        for workerQueue in workerQueues
                    ], )),
        ))

    celery_queue_deployments = deployment_template.render(helm_values)

    celery_queue_configmaps = celery_queue_configmap_template.render(
        helm_values)

    assert len(celery_queue_deployments) == 2

    assert len(celery_queue_configmaps) == 2

    dagster_celery = yaml.full_load(
        celery_queue_configmaps[0].data["celery.yaml"])
    extra_queue_celery = yaml.full_load(
        celery_queue_configmaps[1].data["celery.yaml"])

    assert dagster_celery["execution"]["celery"][
        "config_source"] == workerQueues[0]["configSource"]

    assert (extra_queue_celery["execution"]["celery"]["config_source"] ==
            workerQueues[1]["configSource"])
Пример #6
0
def test_celery_queue_image(deployment_template: HelmTemplate):
    repository = "repository"
    tag = "tag"
    helm_values = DagsterHelmValues.construct(runLauncher=RunLauncher(
        type=RunLauncherType.CELERY,
        config=RunLauncherConfig(
            celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                image=kubernetes.Image.construct(repository=repository,
                                                 tag=tag))),
    ))

    celery_queue_deployments = deployment_template.render(helm_values)

    assert len(celery_queue_deployments) == 1

    image = celery_queue_deployments[0].spec.template.spec.containers[0].image
    image_name, image_tag = image.split(":")

    assert image_name == repository
    assert image_tag == tag
Пример #7
0
def test_default_redis_config(template: HelmTemplate):
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            host="myhost",
        ),
    )
    [secret] = template.render(helm_values)

    expected_celery_broker = "redis://myhost:6379/0"
    expected_celery_backend = "redis://myhost:6379/0"

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(expected_celery_broker, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(expected_celery_backend, encoding="utf-8")
    ).decode("utf-8")
Пример #8
0
def test_celery_backend_override_connection_string(template: HelmTemplate):
    broker_url = "host:6380,password=password,ssl=True"
    backend_url = "host:6381,password=password,ssl=True"
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            brokerUrl=broker_url,
            backendUrl=backend_url,
        ),
    )

    [secret] = template.render(helm_values)

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(broker_url, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(backend_url, encoding="utf-8")
    ).decode("utf-8")
Пример #9
0
def test_k8s_run_launcher_fail_pod_on_run_failure(template: HelmTemplate):
    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.K8S,
            config=RunLauncherConfig.construct(
                k8sRunLauncher=K8sRunLauncherConfig.construct(
                    imagePullPolicy="Always",
                    loadInclusterConfig=True,
                    envConfigMaps=[],
                    envSecrets=[],
                    envVars=[],
                    volumeMounts=[],
                    volumes=[],
                    failPodOnRunFailure=True,
                )),
        ))
    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["config"]["fail_pod_on_run_failure"]
Пример #10
0
def test_k8s_run_launcher_config(template: HelmTemplate):
    job_namespace = "namespace"
    load_incluster_config = True
    env_config_maps = [{"name": "env_config_map"}]
    env_secrets = [{"name": "secret"}]
    env_vars = ["ENV_VAR"]
    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.K8S,
            config=RunLauncherConfig.construct(
                k8sRunLauncher=K8sRunLauncherConfig.construct(
                    jobNamespace=job_namespace,
                    loadInclusterConfig=load_incluster_config,
                    envConfigMaps=env_config_maps,
                    envSecrets=env_secrets,
                    envVars=env_vars,
                )
            ),
        )
    )

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_k8s"
    assert run_launcher_config["class"] == "K8sRunLauncher"
    assert run_launcher_config["config"]["job_namespace"] == job_namespace
    assert run_launcher_config["config"]["load_incluster_config"] == load_incluster_config
    assert run_launcher_config["config"]["env_config_maps"][1:] == [
        configmap["name"] for configmap in env_config_maps
    ]
    assert run_launcher_config["config"]["env_secrets"] == [
        secret["name"] for secret in env_secrets
    ]
    assert run_launcher_config["config"]["env_vars"] == env_vars
Пример #11
0
def test_celery_queue_inherit_config_source(
        deployment_template: HelmTemplate,
        celery_queue_configmap_template: HelmTemplate):

    configSource = {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 1,
    }

    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2,
            "additionalCeleryArgs": ["-E", "--concurrency", "16"],
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1,
            "configSource": {
                "worker_concurrency": 4
            }
        },
    ]

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    configSource=configSource,
                    workerQueues=[
                        CeleryWorkerQueue(**workerQueue)
                        for workerQueue in workerQueues
                    ],
                )),
        ))

    celery_queue_deployments = deployment_template.render(helm_values)

    celery_queue_configmaps = celery_queue_configmap_template.render(
        helm_values)

    assert len(celery_queue_deployments) == 2

    assert len(celery_queue_configmaps) == 2

    dagster_container_spec = celery_queue_deployments[
        0].spec.template.spec.containers[0]
    assert dagster_container_spec.command == ["dagster-celery"]
    assert dagster_container_spec.args == [
        "worker",
        "start",
        "-A",
        "dagster_celery_k8s.app",
        "-y",
        "/opt/dagster/dagster_home/celery-config.yaml",
        "-q",
        "dagster",
        "--",
        "-E",
        "--concurrency",
        "16",
    ]

    liveness_command = [
        "/bin/sh",
        "-c",
        'dagster-celery status -A dagster_celery_k8s.app -y /opt/dagster/dagster_home/celery-config.yaml | grep "${HOSTNAME}:.*OK"',
    ]

    assert (dagster_container_spec.liveness_probe._exec.command  # pylint: disable=protected-access
            == liveness_command)

    extra_queue_container_spec = celery_queue_deployments[
        1].spec.template.spec.containers[0]
    assert extra_queue_container_spec.command == ["dagster-celery"]
    assert extra_queue_container_spec.args == [
        "worker",
        "start",
        "-A",
        "dagster_celery_k8s.app",
        "-y",
        "/opt/dagster/dagster_home/celery-config.yaml",
        "-q",
        "extra-queue-1",
    ]

    assert (extra_queue_container_spec.liveness_probe._exec.command  # pylint: disable=protected-access
            == liveness_command)

    dagster_celery = yaml.full_load(
        celery_queue_configmaps[0].data["celery.yaml"])
    extra_queue_celery = yaml.full_load(
        celery_queue_configmaps[1].data["celery.yaml"])

    assert dagster_celery["execution"]["celery"]["broker"][
        "env"] == "DAGSTER_CELERY_BROKER_URL"
    assert dagster_celery["execution"]["celery"]["backend"][
        "env"] == "DAGSTER_CELERY_BACKEND_URL"

    assert dagster_celery["execution"]["celery"][
        "config_source"] == configSource

    assert extra_queue_celery["execution"]["celery"]["config_source"] == {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 4,
    }

    assert extra_queue_celery["execution"]["celery"]["broker"][
        "env"] == "DAGSTER_CELERY_BROKER_URL"
    assert (extra_queue_celery["execution"]["celery"]["backend"]["env"] ==
            "DAGSTER_CELERY_BACKEND_URL")
Пример #12
0
def test_celery_queue_volumes(deployment_template: HelmTemplate):
    volume_mounts = [
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    repository = "repository"
    tag = "tag"

    helm_values = DagsterHelmValues.construct(runLauncher=RunLauncher(
        type=RunLauncherType.CELERY,
        config=RunLauncherConfig(
            celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                image=kubernetes.Image.construct(repository=repository,
                                                 tag=tag),
                volumeMounts=volume_mounts,
                volumes=volumes,
            )),
    ))

    celery_queue_deployments = deployment_template.render(helm_values)

    assert len(celery_queue_deployments) == 1

    mounts = celery_queue_deployments[0].spec.template.spec.containers[
        0].volume_mounts

    assert [remove_none_recursively(mount.to_dict()) for mount in mounts] == [
        {
            "mount_path": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "sub_path": "dagster.yaml",
        },
        {
            "mount_path": "/opt/dagster/dagster_home/celery-config.yaml",
            "name": "dagster-celery",
            "sub_path": "celery.yaml",
        },
        {
            "mount_path":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "name": "test-volume",
            "sub_path": "volume_mounted_file.yaml",
        },
    ]

    rendered_volumes = celery_queue_deployments[0].spec.template.spec.volumes

    assert [
        remove_none_recursively(volume.to_dict())
        for volume in rendered_volumes
    ] == [
        {
            "config_map": {
                "name": "release-name-dagster-instance"
            },
            "name": "dagster-instance"
        },
        {
            "config_map": {
                "name": "release-name-dagster-celery-dagster"
            },
            "name": "dagster-celery"
        },
        {
            "name": "test-volume",
            "config_map": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistent_volume_claim": {
                "claim_name": "my_claim",
                "read_only": False
            },
        },
    ]
Пример #13
0
def test_k8s_run_launcher_config(template: HelmTemplate):
    job_namespace = "namespace"
    image_pull_policy = "Always"
    load_incluster_config = True
    env_config_maps = [{"name": "env_config_map"}]
    env_secrets = [{"name": "secret"}]
    env_vars = ["ENV_VAR"]
    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    labels = {"my_label_key": "my_label_value"}

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.K8S,
            config=RunLauncherConfig.construct(
                k8sRunLauncher=K8sRunLauncherConfig.construct(
                    jobNamespace=job_namespace,
                    imagePullPolicy=image_pull_policy,
                    loadInclusterConfig=load_incluster_config,
                    envConfigMaps=env_config_maps,
                    envSecrets=env_secrets,
                    envVars=env_vars,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    labels=labels,
                )),
        ))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_k8s"
    assert run_launcher_config["class"] == "K8sRunLauncher"
    assert run_launcher_config["config"]["job_namespace"] == job_namespace
    assert run_launcher_config["config"][
        "load_incluster_config"] == load_incluster_config
    assert run_launcher_config["config"][
        "image_pull_policy"] == image_pull_policy
    assert run_launcher_config["config"]["env_config_maps"][1:] == [
        configmap["name"] for configmap in env_config_maps
    ]
    assert run_launcher_config["config"]["env_secrets"] == [
        secret["name"] for secret in env_secrets
    ]
    assert run_launcher_config["config"]["env_vars"] == env_vars
    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes
    assert run_launcher_config["config"]["labels"] == labels

    assert not "fail_pod_on_run_failure" in run_launcher_config["config"]
Пример #14
0
def test_celery_k8s_run_launcher_config(template: HelmTemplate):
    image = {
        "repository": "test_repo",
        "tag": "test_tag",
        "pullPolicy": "Always"
    }

    configSource = {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 1,
    }

    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1
        },
    ]

    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    labels = {"my_label_key": "my_label_value"}

    image_pull_secrets = [{"name": "IMAGE_PULL_SECRET"}]

    helm_values = DagsterHelmValues.construct(
        imagePullSecrets=image_pull_secrets,
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    labels=labels,
                )),
        ),
    )

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_celery_k8s"
    assert run_launcher_config["class"] == "CeleryK8sRunLauncher"

    assert run_launcher_config["config"]["config_source"] == configSource

    assert run_launcher_config["config"]["broker"] == {
        "env": "DAGSTER_CELERY_BROKER_URL"
    }

    assert run_launcher_config["config"]["backend"] == {
        "env": "DAGSTER_CELERY_BACKEND_URL"
    }

    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes
    assert run_launcher_config["config"]["labels"] == labels

    assert run_launcher_config["config"][
        "image_pull_secrets"] == image_pull_secrets

    assert run_launcher_config["config"]["image_pull_policy"] == "Always"

    assert run_launcher_config["config"][
        "service_account_name"] == "release-name-dagster"

    assert not "fail_pod_on_run_failure" in run_launcher_config["config"]

    helm_values_with_image_pull_policy = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    imagePullPolicy="IfNotPresent",
                )),
        ), )

    configmaps = template.render(helm_values_with_image_pull_policy)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]
    assert run_launcher_config["config"]["image_pull_policy"] == "IfNotPresent"

    helm_values_with_fail_pod_on_run_failure = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    failPodOnRunFailure=True,
                )),
        ), )

    configmaps = template.render(helm_values_with_fail_pod_on_run_failure)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]
    assert run_launcher_config["config"]["fail_pod_on_run_failure"]
Пример #15
0
def test_celery_k8s_run_launcher_config(template: HelmTemplate):
    image = {
        "repository": "test_repo",
        "tag": "test_tag",
        "pullPolicy": "Always"
    }

    configSource = {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 1,
    }

    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1
        },
    ]

    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                )),
        ))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_celery_k8s"
    assert run_launcher_config["class"] == "CeleryK8sRunLauncher"

    assert run_launcher_config["config"]["config_source"] == configSource

    assert run_launcher_config["config"]["broker"] == {
        "env": "DAGSTER_CELERY_BROKER_URL"
    }

    assert run_launcher_config["config"]["backend"] == {
        "env": "DAGSTER_CELERY_BACKEND_URL"
    }

    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes