예제 #1
0
def test_celery_secret_does_not_render_without_celery_run_launcher(
        template: HelmTemplate):
    with pytest.raises(subprocess.CalledProcessError):
        helm_values_generate_celery_secret_enabled_no_run_launcher = DagsterHelmValues.construct(
            generateCeleryConfigSecret=True)
        template.render(
            helm_values_generate_celery_secret_enabled_no_run_launcher)
예제 #2
0
def test_deployments_do_not_render(helm_values: DagsterHelmValues,
                                   template: HelmTemplate, capsys):
    with pytest.raises(subprocess.CalledProcessError):
        template.render(helm_values)

        _, err = capsys.readouterr()
        assert "Error: could not find template" in err
예제 #3
0
def test_user_deployment_checksum_changes(template: HelmTemplate):
    pre_upgrade_helm_values = DagsterHelmValues.construct(
        dagsterUserDeployments=UserDeployments(
            enabled=True,
            enableSubchart=True,
            deployments=[
                create_simple_user_deployment("deployment-one"),
                create_simple_user_deployment("deployment-two"),
            ],
        ))
    post_upgrade_helm_values = DagsterHelmValues.construct(
        dagsterUserDeployments=UserDeployments(
            enabled=True,
            enableSubchart=True,
            deployments=[
                create_complex_user_deployment("deployment-one"),
                create_complex_user_deployment("deployment-two"),
            ],
        ))

    pre_upgrade_templates = template.render(pre_upgrade_helm_values)
    post_upgrade_templates = template.render(post_upgrade_helm_values)

    # User deployment templates with the same Helm values should not redeploy in a Helm upgrade
    for pre_upgrade_user_deployment, post_upgrade_user_deployment in zip(
            pre_upgrade_templates, post_upgrade_templates):
        pre_upgrade_checksum = pre_upgrade_user_deployment.spec.template.metadata.annotations[
            "checksum/dagster-user-deployment"]
        post_upgrade_checksum = post_upgrade_user_deployment.spec.template.metadata.annotations[
            "checksum/dagster-user-deployment"]

        assert pre_upgrade_checksum != post_upgrade_checksum
예제 #4
0
def test_job_instance_migrate_does_not_render(template: HelmTemplate, capsys):
    with pytest.raises(subprocess.CalledProcessError):
        helm_values_migrate_disabled = DagsterHelmValues.construct(migrate=Migrate(enabled=False))

        template.render(helm_values_migrate_disabled)

        _, err = capsys.readouterr()
        assert "Error: could not find template" in err
예제 #5
0
def test_postgresql_secret_does_not_render(template: HelmTemplate, capsys):
    with pytest.raises(subprocess.CalledProcessError):
        helm_values_generate_postgresql_secret_disabled = DagsterHelmValues.construct(
            generatePostgresqlPasswordSecret=False)

        template.render(helm_values_generate_postgresql_secret_disabled)

        _, err = capsys.readouterr()
        assert "Error: could not find template" in err
예제 #6
0
def test_service_account_does_not_render(template: HelmTemplate, capsys):
    with pytest.raises(subprocess.CalledProcessError):
        service_account_values = DagsterHelmValues.construct(
            serviceAccount=ServiceAccount.construct(
                name="service-account-name", create=False), )

        template.render(service_account_values)

        _, err = capsys.readouterr()

        assert "Error: could not find template" in err
예제 #7
0
def test_subchart_service_account_global_name(subchart_template: HelmTemplate, capsys):
    with pytest.raises(subprocess.CalledProcessError):
        global_service_account_name = "global-service-account-name"
        service_account_values = DagsterHelmValues.construct(
            global_=Global.construct(serviceAccountName=global_service_account_name),
        )

        subchart_template.render(service_account_values)

        _, err = capsys.readouterr()

        assert "Error: could not find template" in err
예제 #8
0
def test_workspace_does_not_render(template: HelmTemplate, capsys):
    helm_values = DagsterHelmValues.construct(
        dagsterUserDeployments=UserDeployments(
            enabled=False,
            enableSubchart=False,
            deployments=[create_simple_user_deployment("deployment-one")],
        ))

    with pytest.raises(subprocess.CalledProcessError):
        template.render(helm_values)

        _, err = capsys.readouterr()
        assert "Error: could not find template" in err in err
예제 #9
0
def test_celery_queue_empty_run_launcher_config_source(
        deployment_template: HelmTemplate,
        celery_queue_configmap_template: HelmTemplate):
    workerQueues = [
        {
            "name": "dagster",
            "replicaCount": 2,
            "configSource": {
                "worker_concurrency": 3
            }
        },
        {
            "name": "extra-queue-1",
            "replicaCount": 1,
            "configSource": {
                "worker_concurrency": 4
            }
        },
    ]

    helm_values = DagsterHelmValues.construct(
        runLauncher=RunLauncher.construct(
            type=RunLauncherType.CELERY,
            config=RunLauncherConfig.construct(
                celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
                    workerQueues=[
                        CeleryWorkerQueue(**workerQueue)
                        for workerQueue in workerQueues
                    ], )),
        ))

    celery_queue_deployments = deployment_template.render(helm_values)

    celery_queue_configmaps = celery_queue_configmap_template.render(
        helm_values)

    assert len(celery_queue_deployments) == 2

    assert len(celery_queue_configmaps) == 2

    dagster_celery = yaml.full_load(
        celery_queue_configmaps[0].data["celery.yaml"])
    extra_queue_celery = yaml.full_load(
        celery_queue_configmaps[1].data["celery.yaml"])

    assert dagster_celery["execution"]["celery"][
        "config_source"] == workerQueues[0]["configSource"]

    assert (extra_queue_celery["execution"]["celery"]["config_source"] ==
            workerQueues[1]["configSource"])
예제 #10
0
def test_user_deployment_checksum_unchanged(helm_values: DagsterHelmValues,
                                            template: HelmTemplate):
    pre_upgrade_templates = template.render(helm_values)
    post_upgrade_templates = template.render(helm_values)

    # User deployment templates with the same Helm values should not redeploy in a Helm upgrade
    for pre_upgrade_user_deployment, post_upgrade_user_deployment in zip(
            pre_upgrade_templates, post_upgrade_templates):
        pre_upgrade_checksum = pre_upgrade_user_deployment.spec.template.metadata.annotations[
            "checksum/dagster-user-deployment"]
        post_upgrade_checksum = post_upgrade_user_deployment.spec.template.metadata.annotations[
            "checksum/dagster-user-deployment"]

        assert pre_upgrade_checksum == post_upgrade_checksum
예제 #11
0
def test_workspace_renders_fail(template: HelmTemplate, capsys):
    helm_values = DagsterHelmValues.construct(
        dagsterUserDeployments=UserDeployments(
            enabled=False,
            enableSubchart=True,
            deployments=[],
        ))

    with pytest.raises(subprocess.CalledProcessError):
        template.render(helm_values)

        _, err = capsys.readouterr()
        assert (
            "dagster-user-deployments subchart cannot be enabled if workspace.yaml is not created."
            in err)
예제 #12
0
def test_queued_run_coordinator(instance_template: HelmTemplate, ):  # pylint: disable=redefined-outer-name
    helm_values = DagsterHelmValues.construct(dagsterDaemon=Daemon.construct(
        runCoordinator=RunCoordinator.construct(
            enabled=True,
            type=RunCoordinatorType.QUEUED,
            config=RunCoordinatorConfig.construct(
                queuedRunCoordinator=QueuedRunCoordinatorConfig.construct(
                    tagConcurrencyLimits=[
                        TagConcurrencyLimit.construct(
                            key="foo", value="hi", limit=1)
                    ]), ),
        )))

    configmaps = instance_template.render(helm_values)

    assert len(configmaps) == 1

    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])

    assert instance["run_coordinator"]["class"] == "QueuedRunCoordinator"
    assert instance["run_coordinator"]["config"]["tag_concurrency_limits"] == [
        {
            "key": "foo",
            "value": "hi",
            "limit": 1
        }
    ]
예제 #13
0
def test_custom_compute_log_manager_config(template: HelmTemplate):
    module = "a_module"
    class_ = "Class"
    config_field_one = "1"
    config_field_two = "two"
    config = {"config_field_one": config_field_one, "config_field_two": config_field_two}
    helm_values = DagsterHelmValues.construct(
        computeLogManager=ComputeLogManager.construct(
            type=ComputeLogManagerType.CUSTOM,
            config=ComputeLogManagerConfig.construct(
                customComputeLogManager=ConfigurableClass.construct(
                    module=module,
                    class_=class_,
                    config=config,
                )
            ),
        )
    )

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    compute_logs_config = instance["compute_logs"]

    assert compute_logs_config["module"] == module
    assert compute_logs_config["class"] == class_
    assert compute_logs_config["config"] == config
예제 #14
0
def test_gcs_compute_log_manager(template: HelmTemplate):
    bucket = "bucket"
    local_dir = "/dir"
    prefix = "prefix"
    helm_values = DagsterHelmValues.construct(
        computeLogManager=ComputeLogManager.construct(
            type=ComputeLogManagerType.GCS,
            config=ComputeLogManagerConfig.construct(
                gcsComputeLogManager=GCSComputeLogManagerModel(
                    bucket=bucket, localDir=local_dir, prefix=prefix
                )
            ),
        )
    )

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    compute_logs_config = instance["compute_logs"]

    assert compute_logs_config["module"] == "dagster_gcp.gcs.compute_log_manager"
    assert compute_logs_config["class"] == "GCSComputeLogManager"
    assert compute_logs_config["config"] == {
        "bucket": bucket,
        "local_dir": local_dir,
        "prefix": prefix,
    }

    # Test all config fields in configurable class
    assert compute_logs_config["config"].keys() == GCSComputeLogManager.config_type().keys()
예제 #15
0
def test_workspace_renders_from_helm_user_deployments(template: HelmTemplate):
    deployments = [
        create_simple_user_deployment("deployment-one"),
        create_simple_user_deployment("deployment-two"),
    ]
    helm_values = DagsterHelmValues.construct(
        dagsterUserDeployments=UserDeployments(
            enabled=True,
            enableSubchart=True,
            deployments=deployments,
        ))

    workspace_templates = template.render(helm_values)

    assert len(workspace_templates) == 1

    workspace_template = workspace_templates[0]

    workspace = yaml.full_load(workspace_template.data["workspace.yaml"])
    grpc_servers = workspace["load_from"]

    assert len(grpc_servers) == len(deployments)

    for grpc_server, deployment in zip(grpc_servers, deployments):
        assert grpc_server["grpc_server"]["host"] == deployment.name
        assert grpc_server["grpc_server"]["port"] == deployment.port
        assert grpc_server["grpc_server"]["location_name"] == deployment.name
예제 #16
0
def test_custom_run_coordinator_config(template: HelmTemplate):
    module = "a_module"
    class_ = "Class"
    config_field_one = "1"
    config_field_two = "two"
    config = {
        "config_field_one": config_field_one,
        "config_field_two": config_field_two
    }
    helm_values = DagsterHelmValues.construct(dagsterDaemon=Daemon.construct(
        runCoordinator=RunCoordinator.construct(
            enabled=True,
            type=RunCoordinatorType.CUSTOM,
            config=RunCoordinatorConfig.construct(
                customRunCoordinator=ConfigurableClass.construct(
                    module=module,
                    class_=class_,
                    config=config,
                )),
        )))
    configmaps = template.render(helm_values)
    assert len(configmaps) == 1

    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])

    assert instance["run_coordinator"]["module"] == module
    assert instance["run_coordinator"]["class"] == class_
    assert instance["run_coordinator"]["config"] == config
예제 #17
0
def test_workspace_renders_from_helm_dagit(template: HelmTemplate):
    servers = [
        Server(host="another-deployment-one", port=4000),
        Server(host="another-deployment-two", port=4001),
        Server(host="another-deployment-three", port=4002),
    ]
    helm_values = DagsterHelmValues.construct(
        dagit=Dagit.construct(
            workspace=Workspace(enabled=True, servers=servers)),
        dagsterUserDeployments=UserDeployments(
            enabled=True,
            enableSubchart=True,
            deployments=[
                create_simple_user_deployment("deployment-one"),
                create_simple_user_deployment("deployment-two"),
            ],
        ),
    )

    workspace_templates = template.render(helm_values)

    assert len(workspace_templates) == 1

    workspace_template = workspace_templates[0]

    workspace = yaml.full_load(workspace_template.data["workspace.yaml"])
    grpc_servers = workspace["load_from"]

    assert len(grpc_servers) == len(servers)

    for grpc_server, server in zip(grpc_servers, servers):
        assert grpc_server["grpc_server"]["host"] == server.host
        assert grpc_server["grpc_server"]["port"] == server.port
        assert grpc_server["grpc_server"]["location_name"] == server.host
예제 #18
0
def test_ingress_read_only(template: HelmTemplate):
    helm_values = DagsterHelmValues.construct(
        ingress=Ingress.construct(
            enabled=True,
            dagit=DagitIngressConfiguration.construct(
                host="foobar.com",
                path="bing",
                precedingPaths=[
                    IngressPath(path="/*",
                                serviceName="ssl-redirect",
                                servicePort="use-annotion")
                ],
            ),
            readOnlyDagit=DagitIngressConfiguration.construct(
                host="dagster.io",
                succeedingPaths=[
                    IngressPath(path="/*",
                                serviceName="ssl-redirect",
                                servicePort="use-annotion")
                ],
            ),
        ),
        dagit=Dagit.construct(enableReadOnly=True),
    )

    ingress_template = template.render(helm_values)
    assert len(ingress_template) == 1
    ingress = ingress_template[0]

    assert len(ingress.spec.rules) == 2
    assert [rule.host
            for rule in ingress.spec.rules] == ["foobar.com", "dagster.io"]
예제 #19
0
def test_postgresql_secret_renders(template: HelmTemplate):
    helm_values_generate_postgresql_secret_enabled = DagsterHelmValues.construct(
        generatePostgresqlPasswordSecret=True)

    secrets = template.render(helm_values_generate_postgresql_secret_enabled)

    assert len(secrets) == 1
예제 #20
0
def test_storage_postgres_db_config(template: HelmTemplate, storage: str):
    postgresql_username = "******"
    postgresql_host = "1.1.1.1"
    postgresql_database = "database"
    postgresql_params = {
        "connect_timeout": 10,
        "application_name": "myapp",
        "options": "-c synchronous_commit=off",
    }
    postgresql_port = 8080
    helm_values = DagsterHelmValues.construct(postgresql=PostgreSQL.construct(
        postgresqlUsername=postgresql_username,
        postgresqlHost=postgresql_host,
        postgresqlDatabase=postgresql_database,
        postgresqlParams=postgresql_params,
        service=Service(port=postgresql_port),
    ))

    configmaps = template.render(helm_values)

    assert len(configmaps) == 1

    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])

    assert instance[storage]

    postgres_db = instance[storage]["config"]["postgres_db"]

    assert postgres_db["username"] == postgresql_username
    assert postgres_db["password"] == {"env": "DAGSTER_PG_PASSWORD"}
    assert postgres_db["hostname"] == postgresql_host
    assert postgres_db["db_name"] == postgresql_database
    assert postgres_db["port"] == postgresql_port
    assert postgres_db["params"] == postgresql_params
예제 #21
0
def test_job_instance_migrate_renders(template: HelmTemplate):
    helm_values_migrate_enabled = DagsterHelmValues.construct(migrate=Migrate(
        enabled=True))

    jobs = template.render(helm_values_migrate_enabled)

    assert len(jobs) == 1
예제 #22
0
def test_custom_python_logs_config(template: HelmTemplate):
    log_level = "INFO"
    managed_python_loggers = ["foo", "bar", "baz"]
    handler_config = {
        "handlers": {
            "myHandler": {
                "class": "logging.StreamHandler",
                "level": "INFO",
                "stream": "foo"
            }
        },
        "formatters": {
            "myFormatter": {
                "format": "%(message)s"
            }
        },
    }
    helm_values = DagsterHelmValues.construct(pythonLogs=PythonLogs.construct(
        pythonLogLevel=log_level,
        managedPythonLoggers=managed_python_loggers,
        dagsterHandlerConfig=handler_config,
    ))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    python_logs_config = instance["python_logs"]

    assert python_logs_config["python_log_level"] == log_level
    assert python_logs_config[
        "managed_python_loggers"] == managed_python_loggers
    assert python_logs_config["dagster_handler_config"] == handler_config
예제 #23
0
def test_celery_backend_with_redis_with_password(template: HelmTemplate):
    redis_password = "******"
    redis_host = "host"
    redis_port = 6379
    broker_db_number = 20
    backend_db_number = 21
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            usePassword=True,
            password=redis_password,
            host=redis_host,
            port=redis_port,
            brokerDbNumber=broker_db_number,
            backendDbNumber=backend_db_number,
        ),
    )

    [secret] = template.render(helm_values)

    expected_celery_broker = (
        f"redis://:{redis_password}@{redis_host}:{redis_port}/{broker_db_number}"
    )
    expected_celery_backend = (
        f"redis://:{redis_password}@{redis_host}:{redis_port}/{backend_db_number}"
    )

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(expected_celery_broker, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(expected_celery_backend, encoding="utf-8")
    ).decode("utf-8")
예제 #24
0
def test_chart_does_not_render(full_template: HelmTemplate, capsys):
    helm_values = DagsterHelmValues.construct(
        dagsterUserDeployments=UserDeployments(
            enabled=False,
            enableSubchart=True,
            deployments=[
                create_simple_user_deployment("simple-deployment-one")
            ],
        ))

    with pytest.raises(subprocess.CalledProcessError):
        full_template.render(helm_values)

        _, err = capsys.readouterr()
        assert (
            "dagster-user-deployments subchart cannot be enabled if workspace.yaml is not created."
            in err)
예제 #25
0
def test_dagit_read_only_disabled(deployment_template: HelmTemplate):
    helm_values = DagsterHelmValues.construct()

    dagit_template = deployment_template.render(helm_values)

    assert len(dagit_template) == 1
    assert "--read-only" not in "".join(
        dagit_template[0].spec.template.spec.containers[0].command)
예제 #26
0
def test_telemetry(template: HelmTemplate, enabled: bool):
    helm_values = DagsterHelmValues.construct(telemetry=Telemetry.construct(
        enabled=enabled))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    telemetry_config = instance.get("telemetry")

    assert telemetry_config["enabled"] == enabled
예제 #27
0
def test_celery_secret_renders(template: HelmTemplate):
    helm_values_generate_celery_secret_enabled = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
    )

    secrets = template.render(helm_values_generate_celery_secret_enabled)

    assert len(secrets) == 1
예제 #28
0
def test_dagit_db_statement_timeout(deployment_template: HelmTemplate):
    db_statement_timeout_ms = 9000
    helm_values = DagsterHelmValues.construct(dagit=Dagit.construct(
        dbStatementTimeout=db_statement_timeout_ms))

    dagit_deployments = deployment_template.render(helm_values)
    command = " ".join(
        dagit_deployments[0].spec.template.spec.containers[0].command)

    assert f"--db-statement-timeout {db_statement_timeout_ms}" in command
예제 #29
0
def test_celery_backend_override_only_one(template: HelmTemplate):
    custom_url = "host:6380,password=password,ssl=True"
    default_url = "redis://myhost:6379/0"

    # Override broker, not backend
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            brokerUrl=custom_url,
            host="myhost",
        ),
    )

    [secret] = template.render(helm_values)

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(custom_url, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(default_url, encoding="utf-8")
    ).decode("utf-8")

    # Override backend, not broker
    helm_values = DagsterHelmValues.construct(
        generateCeleryConfigSecret=True,
        runLauncher=RunLauncher.construct(type=RunLauncherType.CELERY),
        redis=Redis.construct(
            enabled=True,
            backendUrl=custom_url,
            host="myhost",
        ),
    )

    [secret] = template.render(helm_values)

    assert secret.data["DAGSTER_CELERY_BROKER_URL"] == base64.b64encode(
        bytes(default_url, encoding="utf-8")
    ).decode("utf-8")
    assert secret.data["DAGSTER_CELERY_BACKEND_URL"] == base64.b64encode(
        bytes(custom_url, encoding="utf-8")
    ).decode("utf-8")
예제 #30
0
def test_custom_python_logs_missing_config(template: HelmTemplate):
    helm_values = DagsterHelmValues.construct(pythonLogs=PythonLogs.construct(
        pythonLogLevel="INFO"))

    configmaps = template.render(helm_values)
    instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
    python_logs_config = instance["python_logs"]

    assert python_logs_config["python_log_level"] == "INFO"
    assert "managed_python_loggers" not in python_logs_config
    assert "dagster_handler_config" not in python_logs_config