Exemplo n.º 1
0
def test_celery_queue_empty_run_launcher_config_source(
        deployment_template: HelmTemplate,
        celery_queue_configmap_template: HelmTemplate):
    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2,
            "configSource": {
                "worker_concurrency": 3
            }
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1,
            "configSource": {
                "worker_concurrency": 4
            }
        },
    ]

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    workerQueues=[
                        CeleryWorkerQueue(**workerQueue)
                        for workerQueue in workerQueues
                    ], )),
        ))

    celery_queue_deployments = deployment_template.render(helm_values)

    celery_queue_configmaps = celery_queue_configmap_template.render(
        helm_values)

    assert len(celery_queue_deployments) == 2

    assert len(celery_queue_configmaps) == 2

    dagster_celery = yaml.full_load(
        celery_queue_configmaps[0].data["celery.yaml"])
    extra_queue_celery = yaml.full_load(
        celery_queue_configmaps[1].data["celery.yaml"])

    assert dagster_celery["execution"]["celery"][
        "config_source"] == workerQueues[0]["configSource"]

    assert (extra_queue_celery["execution"]["celery"]["config_source"] ==
            workerQueues[1]["configSource"])
Exemplo n.º 2
0
def test_celery_queue_image(deployment_template: HelmTemplate):
    repository = "repository"
    tag = "tag"
    helm_values = DagsterHelmValues.construct(runLauncher=RunLauncher(
        type=RunLauncherType.CELERY,
        config=RunLauncherConfig(
            celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                image=kubernetes.Image.construct(repository=repository,
                                                 tag=tag))),
    ))

    celery_queue_deployments = deployment_template.render(helm_values)

    assert len(celery_queue_deployments) == 1

    image = celery_queue_deployments[0].spec.template.spec.containers[0].image
    image_name, image_tag = image.split(":")

    assert image_name == repository
    assert image_tag == tag
Exemplo n.º 3
0
def test_k8s_run_launcher_fail_pod_on_run_failure(template: HelmTemplate):
    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.K8S,
            config=RunLauncherConfig.construct(
                k8sRunLauncher=K8sRunLauncherConfig.construct(
                    imagePullPolicy="Always",
                    loadInclusterConfig=True,
                    envConfigMaps=[],
                    envSecrets=[],
                    envVars=[],
                    volumeMounts=[],
                    volumes=[],
                    failPodOnRunFailure=True,
                )),
        ))
    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["config"]["fail_pod_on_run_failure"]
Exemplo n.º 4
0
def test_k8s_run_launcher_config(template: HelmTemplate):
    job_namespace = "namespace"
    load_incluster_config = True
    env_config_maps = [{"name": "env_config_map"}]
    env_secrets = [{"name": "secret"}]
    env_vars = ["ENV_VAR"]
    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.K8S,
            config=RunLauncherConfig.construct(
                k8sRunLauncher=K8sRunLauncherConfig.construct(
                    jobNamespace=job_namespace,
                    loadInclusterConfig=load_incluster_config,
                    envConfigMaps=env_config_maps,
                    envSecrets=env_secrets,
                    envVars=env_vars,
                )
            ),
        )
    )

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_k8s"
    assert run_launcher_config["class"] == "K8sRunLauncher"
    assert run_launcher_config["config"]["job_namespace"] == job_namespace
    assert run_launcher_config["config"]["load_incluster_config"] == load_incluster_config
    assert run_launcher_config["config"]["env_config_maps"][1:] == [
        configmap["name"] for configmap in env_config_maps
    ]
    assert run_launcher_config["config"]["env_secrets"] == [
        secret["name"] for secret in env_secrets
    ]
    assert run_launcher_config["config"]["env_vars"] == env_vars
Exemplo n.º 5
0
def test_celery_queue_inherit_config_source(
        deployment_template: HelmTemplate,
        celery_queue_configmap_template: HelmTemplate):

    configSource = {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 1,
    }

    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2,
            "additionalCeleryArgs": ["-E", "--concurrency", "16"],
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1,
            "configSource": {
                "worker_concurrency": 4
            }
        },
    ]

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    configSource=configSource,
                    workerQueues=[
                        CeleryWorkerQueue(**workerQueue)
                        for workerQueue in workerQueues
                    ],
                )),
        ))

    celery_queue_deployments = deployment_template.render(helm_values)

    celery_queue_configmaps = celery_queue_configmap_template.render(
        helm_values)

    assert len(celery_queue_deployments) == 2

    assert len(celery_queue_configmaps) == 2

    dagster_container_spec = celery_queue_deployments[
        0].spec.template.spec.containers[0]
    assert dagster_container_spec.command == ["dagster-celery"]
    assert dagster_container_spec.args == [
        "worker",
        "start",
        "-A",
        "dagster_celery_k8s.app",
        "-y",
        "/opt/dagster/dagster_home/celery-config.yaml",
        "-q",
        "dagster",
        "--",
        "-E",
        "--concurrency",
        "16",
    ]

    liveness_command = [
        "/bin/sh",
        "-c",
        'dagster-celery status -A dagster_celery_k8s.app -y /opt/dagster/dagster_home/celery-config.yaml | grep "${HOSTNAME}:.*OK"',
    ]

    assert (dagster_container_spec.liveness_probe._exec.command  # pylint: disable=protected-access
            == liveness_command)

    extra_queue_container_spec = celery_queue_deployments[
        1].spec.template.spec.containers[0]
    assert extra_queue_container_spec.command == ["dagster-celery"]
    assert extra_queue_container_spec.args == [
        "worker",
        "start",
        "-A",
        "dagster_celery_k8s.app",
        "-y",
        "/opt/dagster/dagster_home/celery-config.yaml",
        "-q",
        "extra-queue-1",
    ]

    assert (extra_queue_container_spec.liveness_probe._exec.command  # pylint: disable=protected-access
            == liveness_command)

    dagster_celery = yaml.full_load(
        celery_queue_configmaps[0].data["celery.yaml"])
    extra_queue_celery = yaml.full_load(
        celery_queue_configmaps[1].data["celery.yaml"])

    assert dagster_celery["execution"]["celery"]["broker"][
        "env"] == "DAGSTER_CELERY_BROKER_URL"
    assert dagster_celery["execution"]["celery"]["backend"][
        "env"] == "DAGSTER_CELERY_BACKEND_URL"

    assert dagster_celery["execution"]["celery"][
        "config_source"] == configSource

    assert extra_queue_celery["execution"]["celery"]["config_source"] == {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 4,
    }

    assert extra_queue_celery["execution"]["celery"]["broker"][
        "env"] == "DAGSTER_CELERY_BROKER_URL"
    assert (extra_queue_celery["execution"]["celery"]["backend"]["env"] ==
            "DAGSTER_CELERY_BACKEND_URL")
Exemplo n.º 6
0
def test_celery_queue_volumes(deployment_template: HelmTemplate):
    volume_mounts = [
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    repository = "repository"
    tag = "tag"

    helm_values = DagsterHelmValues.construct(runLauncher=RunLauncher(
        type=RunLauncherType.CELERY,
        config=RunLauncherConfig(
            celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                image=kubernetes.Image.construct(repository=repository,
                                                 tag=tag),
                volumeMounts=volume_mounts,
                volumes=volumes,
            )),
    ))

    celery_queue_deployments = deployment_template.render(helm_values)

    assert len(celery_queue_deployments) == 1

    mounts = celery_queue_deployments[0].spec.template.spec.containers[
        0].volume_mounts

    assert [remove_none_recursively(mount.to_dict()) for mount in mounts] == [
        {
            "mount_path": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "sub_path": "dagster.yaml",
        },
        {
            "mount_path": "/opt/dagster/dagster_home/celery-config.yaml",
            "name": "dagster-celery",
            "sub_path": "celery.yaml",
        },
        {
            "mount_path":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "name": "test-volume",
            "sub_path": "volume_mounted_file.yaml",
        },
    ]

    rendered_volumes = celery_queue_deployments[0].spec.template.spec.volumes

    assert [
        remove_none_recursively(volume.to_dict())
        for volume in rendered_volumes
    ] == [
        {
            "config_map": {
                "name": "release-name-dagster-instance"
            },
            "name": "dagster-instance"
        },
        {
            "config_map": {
                "name": "release-name-dagster-celery-dagster"
            },
            "name": "dagster-celery"
        },
        {
            "name": "test-volume",
            "config_map": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistent_volume_claim": {
                "claim_name": "my_claim",
                "read_only": False
            },
        },
    ]
Exemplo n.º 7
0
def test_k8s_run_launcher_config(template: HelmTemplate):
    job_namespace = "namespace"
    image_pull_policy = "Always"
    load_incluster_config = True
    env_config_maps = [{"name": "env_config_map"}]
    env_secrets = [{"name": "secret"}]
    env_vars = ["ENV_VAR"]
    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    labels = {"my_label_key": "my_label_value"}

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.K8S,
            config=RunLauncherConfig.construct(
                k8sRunLauncher=K8sRunLauncherConfig.construct(
                    jobNamespace=job_namespace,
                    imagePullPolicy=image_pull_policy,
                    loadInclusterConfig=load_incluster_config,
                    envConfigMaps=env_config_maps,
                    envSecrets=env_secrets,
                    envVars=env_vars,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    labels=labels,
                )),
        ))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_k8s"
    assert run_launcher_config["class"] == "K8sRunLauncher"
    assert run_launcher_config["config"]["job_namespace"] == job_namespace
    assert run_launcher_config["config"][
        "load_incluster_config"] == load_incluster_config
    assert run_launcher_config["config"][
        "image_pull_policy"] == image_pull_policy
    assert run_launcher_config["config"]["env_config_maps"][1:] == [
        configmap["name"] for configmap in env_config_maps
    ]
    assert run_launcher_config["config"]["env_secrets"] == [
        secret["name"] for secret in env_secrets
    ]
    assert run_launcher_config["config"]["env_vars"] == env_vars
    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes
    assert run_launcher_config["config"]["labels"] == labels

    assert not "fail_pod_on_run_failure" in run_launcher_config["config"]
Exemplo n.º 8
0
def test_celery_k8s_run_launcher_config(template: HelmTemplate):
    image = {
        "repository": "test_repo",
        "tag": "test_tag",
        "pullPolicy": "Always"
    }

    configSource = {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 1,
    }

    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1
        },
    ]

    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    labels = {"my_label_key": "my_label_value"}

    image_pull_secrets = [{"name": "IMAGE_PULL_SECRET"}]

    helm_values = DagsterHelmValues.construct(
        imagePullSecrets=image_pull_secrets,
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    labels=labels,
                )),
        ),
    )

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_celery_k8s"
    assert run_launcher_config["class"] == "CeleryK8sRunLauncher"

    assert run_launcher_config["config"]["config_source"] == configSource

    assert run_launcher_config["config"]["broker"] == {
        "env": "DAGSTER_CELERY_BROKER_URL"
    }

    assert run_launcher_config["config"]["backend"] == {
        "env": "DAGSTER_CELERY_BACKEND_URL"
    }

    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes
    assert run_launcher_config["config"]["labels"] == labels

    assert run_launcher_config["config"][
        "image_pull_secrets"] == image_pull_secrets

    assert run_launcher_config["config"]["image_pull_policy"] == "Always"

    assert run_launcher_config["config"][
        "service_account_name"] == "release-name-dagster"

    assert not "fail_pod_on_run_failure" in run_launcher_config["config"]

    helm_values_with_image_pull_policy = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                    imagePullPolicy="IfNotPresent",
                )),
        ), )

    configmaps = template.render(helm_values_with_image_pull_policy)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]
    assert run_launcher_config["config"]["image_pull_policy"] == "IfNotPresent"

    helm_values_with_fail_pod_on_run_failure = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    failPodOnRunFailure=True,
                )),
        ), )

    configmaps = template.render(helm_values_with_fail_pod_on_run_failure)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]
    assert run_launcher_config["config"]["fail_pod_on_run_failure"]
Exemplo n.º 9
0
def test_celery_k8s_run_launcher_config(template: HelmTemplate):
    image = {
        "repository": "test_repo",
        "tag": "test_tag",
        "pullPolicy": "Always"
    }

    configSource = {
        "broker_transport_options": {
            "priority_steps": [9]
        },
        "worker_concurrency": 1,
    }

    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1
        },
    ]

    volume_mounts = [
        {
            "mountPath": "/opt/dagster/dagster_home/dagster.yaml",
            "name": "dagster-instance",
            "subPath": "dagster.yaml",
        },
        {
            "name": "test-volume",
            "mountPath":
            "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
            "subPath": "volume_mounted_file.yaml",
        },
    ]

    volumes = [
        {
            "name": "test-volume",
            "configMap": {
                "name": "test-volume-configmap"
            }
        },
        {
            "name": "test-pvc",
            "persistentVolumeClaim": {
                "claimName": "my_claim",
                "readOnly": False
            }
        },
    ]

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    image=image,
                    configSource=configSource,
                    workerQueues=workerQueues,
                    volumeMounts=volume_mounts,
                    volumes=volumes,
                )),
        ))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    run_launcher_config = instance["run_launcher"]

    assert run_launcher_config["module"] == "dagster_celery_k8s"
    assert run_launcher_config["class"] == "CeleryK8sRunLauncher"

    assert run_launcher_config["config"]["config_source"] == configSource

    assert run_launcher_config["config"]["broker"] == {
        "env": "DAGSTER_CELERY_BROKER_URL"
    }

    assert run_launcher_config["config"]["backend"] == {
        "env": "DAGSTER_CELERY_BACKEND_URL"
    }

    assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
    assert run_launcher_config["config"]["volumes"] == volumes