with Then('insert data x2'): clickhouse.query( chi['metadata']['name'], 'INSERT INTO default.zk_repl SELECT number*3 FROM numbers(1000)', pod="chi-test-cluster-for-zk-default-0-0-0") assert clickhouse.query(chi['metadata']['name'], 'SELECT count() FROM default.zk_repl', pod="chi-test-cluster-for-zk-default-0-1-0" ) == '3000', "Invalid rows after 3x1000 inserts" clickhouse.drop_table_on_cluster(chi, 'all-sharded', 'default.zk_repl') if main(): with Module("main"): clickhouse_operator_spec, chi = util.install_clickhouse_and_zookeeper( chi_file='configs/test-cluster-for-zookeeper.yaml', chi_template_file='templates/tpl-clickhouse-latest.yaml', chi_name='test-cluster-for-zk', ) util.wait_clickhouse_cluster_ready(chi) all_tests = [test_zookeeper_rescale] for t in all_tests: if callable(t): Scenario(test=t)() else: Scenario(test=t[0], args=t[1])()
@Name("Empty installation, creates 1 node") def test_examples01_1(): create_and_check("../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml", {"object_counts": [1,1,2]}) @TestScenario @Name("1 shard 2 replicas") def test_examples01_2(): create_and_check("../docs/chi-examples/01-simple-layout-02-1shard-2repl.yaml", {"object_counts": [2,2,3]}) @TestScenario @Name("Persistent volume mapping via defaults") def test_examples02_1(): create_and_check("../docs/chi-examples/03-persistent-volume-01-default-volume.yaml", {"pod_count": 1, "pod_volumes": {"/var/lib/clickhouse", "/var/log/clickhouse-server"}}) @TestScenario @Name("Persistent volume mapping via podTemplate") def test_examples02_2(): create_and_check("../docs/chi-examples/03-persistent-volume-02-pod-template.yaml", {"pod_count": 1, "pod_image": "yandex/clickhouse-server:19.3.7", "pod_volumes": {"/var/lib/clickhouse", "/var/log/clickhouse-server"}}) if main(): with Module("examples", flags=TE): examples = [test_examples01_1, test_examples01_2, test_examples02_1, test_examples02_2] for t in examples: run(test=t, flags=TE)
from clickhouse import * from kubectl import * import settings from test_operator import * from test_clickhouse import * from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE, args from testflows.asserts import error if main(): with Module("main", flags=TE): with Given(f"Clean namespace {settings.test_namespace}"): kube_deletens(settings.test_namespace) kube_createns(settings.test_namespace) with Given( f"clickhouse-operator version {settings.version} is installed" ): if kube_get_count("pod", ns='kube-system', label="-l app=clickhouse-operator") == 0: config = get_full_path( '../deploy/operator/clickhouse-operator-install-template.yaml' ) kube_apply( f"<(cat {config} | " f"OPERATOR_IMAGE=\"altinity/clickhouse-operator:{settings.version}\" " f"OPERATOR_NAMESPACE=\"kube-system\" " f"METRICS_EXPORTER_IMAGE=\"altinity/metrics-exporter:{settings.version}\" " f"METRICS_EXPORTER_NAMESPACE=\"kube-system\" " f"envsubst)",
@TestScenario @Name("Test template with custom clickhouse ports") def test_007(): create_and_check( "configs/test-007-custom-ports.yaml", { "object_counts": [1, 1, 2], "apply_templates": {"configs/tpl-custom-ports.yaml"}, "pod_image": "yandex/clickhouse-server:19.11.8.46", "pod_ports": [8124, 9001, 9010] }) if main(): with Module("regression"): with Given("clickhouse-operator in installed"): assert kube_get_count("pod", "kube-system", "-l app=clickhouse-operator") > 0, error() with And(f"Clean namespace {namespace}"): kube_deletens(namespace) with And(f"Create namespace {namespace}"): kube_createns(namespace) tests = [ test_001, test_002, test_003, test_004, test_005, test_006, test_007 ] examples = [ test_examples01_1, test_examples01_2, test_examples02_1, test_examples02_2
restart_zookeeper() with Then("check ZookeeperRestartRecently firing"): fired = wait_alert_state("ZookeeperRestartRecently", "firing", True, labels={"pod": zookeeper_pod}, time_range='30s', sleep_time=5) assert fired, error("can't get ZookeeperRestartRecently alert in firing state") wait_when_zookeeper_up() with Then("check ZookeeperRestartRecently gone away"): resolved = wait_alert_state("ZookeeperRestartRecently", "firing", False, labels={"pod": zookeeper_pod}) assert resolved, error("can't check ZookeeperRestartRecently alert is gone away") if main(): with Module("main"): with Given("get information about prometheus installation"): prometheus_operator_spec = kubectl.get( "pod", ns=settings.prometheus_namespace, name="", label="-l app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator" ) alertmanager_spec = kubectl.get( "pod", ns=settings.prometheus_namespace, name="", label="-l app=alertmanager,alertmanager=alertmanager" ) prometheus_spec = kubectl.get( "pod", ns=settings.prometheus_namespace, name="", label="-l app=prometheus,prometheus=prometheus" )
"do_not_delete": 1 }) with Then("user2/networks should be in config"): chi = kube_get("chi", "test-018-configmap") assert "user2/networks/ip" in chi["spec"]["configuration"]["users"] with And("user1/networks/ip should NOT be in config"): assert "user1/networks/ip" not in chi["spec"]["configuration"][ "users"] kube_delete_chi("test-018-configmap") # End of test scenarios if main(): with Module("main", flags=TE): with Given(f"Clean namespace {settings.test_namespace}"): kube_deletens(settings.test_namespace) with And(f"Create namespace {settings.test_namespace}"): kube_createns(settings.test_namespace) with Given(f"ClickHouse template {settings.clickhouse_template}"): kube_apply(get_full_path(settings.clickhouse_template), settings.test_namespace) with And(f"ClickHouse version {settings.clickhouse_version}"): 1 == 1 with Given("clickhouse-operator is installed"): if kube_get_count("pod", ns='kube-system', label="-l app=clickhouse-operator") == 0:
"pod_volumes": { "/var/lib/clickhouse", "/var/log/clickhouse-server", }, }) @TestScenario @Name("Persistent volume mapping via podTemplate") def test_examples02_2(): kubectl.create_and_check( config="../docs/chi-examples/03-persistent-volume-02-pod-template.yaml", check={ "pod_count": 1, "pod_image": "yandex/clickhouse-server:19.3.7", "pod_volumes": { "/var/lib/clickhouse", "/var/log/clickhouse-server", }, }) if main(): with Module("examples"): examples = [ test_examples01_1, test_examples01_2, test_examples02_1, test_examples02_2 ] for t in examples: Scenario(test=t)
import kubectl import settings import test_operator import test_clickhouse import util from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE, args, Fail, Error from testflows.asserts import error if main(): with Module("main"): with Given(f"Clean namespace {settings.test_namespace}"): kubectl.delete_all_chi(settings.test_namespace) kubectl.delete_ns(settings.test_namespace, ok_to_fail=True) kubectl.create_ns(settings.test_namespace) with Given( f"clickhouse-operator version {settings.operator_version} is installed" ): if kubectl.get_count("pod", ns=settings.operator_namespace, label="-l app=clickhouse-operator") == 0: config = util.get_full_path( '../deploy/operator/clickhouse-operator-install-template.yaml' ) kubectl.apply( ns=settings.operator_namespace, config=f"<(cat {config} | " f"OPERATOR_IMAGE=\"{settings.operator_docker_repo}:{settings.operator_version}\" " f"OPERATOR_NAMESPACE=\"{settings.operator_namespace}\" " f"METRICS_EXPORTER_IMAGE=\"{settings.metrics_exporter_docker_repo}:{settings.operator_version}\" "
True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-0-0': True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-1-0': True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-2-0': True, 'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-3-0': True, }) with Then("check empty /metrics after delete namespace"): kubectl.kube_deletens(kubectl.namespace) check_monitoring_metrics( operator_namespace, operator_pod, expect_result={ 'chi_clickhouse_metric_VersionInteger': False, }) if main(): with Module("metrics_exporter", flags=TE): test_cases = [ test_metrics_exporter_setup, test_metrics_exporter_reboot, test_metrics_exporter_with_multiple_clickhouse_version, ] for t in test_cases: run(test=t, flags=TE)
# kubectl patch chi test-014-replication -n test --type=json -p '[{"op":"add", "path": "/spec/configuration/clusters/0/layout/shards/0/replicasCount", "value": 3}]' with Then("Replicated table should be automatically created"): out = clickhouse_query("test-014-replication", "select a from t", host="chi-test-014-replication-default-0-2") assert out == "1" create_and_check("configs/test-014-replication.yaml", {}) kubectl.namespace = "test" version = "latest" clickhouse_stable = "yandex/clickhouse-server:19.16.10.44" if main(): with Module("main"): with Given("clickhouse-operator is installed"): # assert kube_get_count("pod", ns = "kube-system", label = "-l app=clickhouse-operator")>0, error() with And(f"Set operator version {version}"): set_operator_version(version) with And(f"Clean namespace {kubectl.namespace}"): kube_deletens(kubectl.namespace) with And(f"Create namespace {kubectl.namespace}"): kube_createns(kubectl.namespace) with Module("examples", flags=TE): examples = [ test_examples01_1, test_examples01_2, test_examples02_1, test_examples02_2 ] for t in examples:
import kubectl import settings import test_operator import util from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE, args from testflows.asserts import error if main(): with Module("main"): with Given(f"Clean namespace {settings.test_namespace}"): kubectl.delete_all_chi(settings.test_namespace) kubectl.delete_ns(settings.test_namespace, ok_to_fail=True) kubectl.create_ns(settings.test_namespace) # with Given(f"daisy-operator version {settings.operator_version} is installed"): # if kubectl.get_count("pod", ns=settings.operator_namespace, label="-l app=clickhouse-operator") == 0: # config = util.get_full_path('../deploy/operator/clickhouse-operator-install-template.yaml') # kubectl.apply( # ns=settings.operator_namespace, # config=f"<(cat {config} | " # f"OPERATOR_IMAGE=\"{settings.operator_docker_repo}:{settings.operator_version}\" " # f"OPERATOR_NAMESPACE=\"{settings.operator_namespace}\" " # f"METRICS_EXPORTER_IMAGE=\"{settings.metrics_exporter_docker_repo}:{settings.operator_version}\" " # f"METRICS_EXPORTER_NAMESPACE=\"{settings.operator_namespace}\" " # f"envsubst)", # validate=False # ) # test_operator.set_operator_version(settings.operator_version) # # with Given(f"Install ClickHouse template {settings.clickhouse_template}"):
from clickhouse import * from kubectl import * import settings from test_operator import * from test_clickhouse import * from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE, args from testflows.asserts import error if main(): with Module("main"): with Given(f"Clean namespace {settings.test_namespace}"): kube_delete_all_chi(settings.test_namespace) kube_deletens(settings.test_namespace) kube_createns(settings.test_namespace) with Given( f"clickhouse-operator version {settings.operator_version} is installed" ): if kube_get_count("pod", ns=settings.operator_namespace, label="-l app=clickhouse-operator") == 0: config = get_full_path( '../deploy/operator/clickhouse-operator-install-template.yaml' ) kube_apply( f"<(cat {config} | " f"OPERATOR_IMAGE=\"altinity/clickhouse-operator:{settings.operator_version}\" " f"OPERATOR_NAMESPACE=\"{settings.operator_namespace}\" " f"METRICS_EXPORTER_IMAGE=\"altinity/metrics-exporter:{settings.operator_version}\" " f"METRICS_EXPORTER_NAMESPACE=\"{settings.operator_namespace}\" "
], "/regression/parameterized/:/nullable/datatypes/String/ascii": [ (Fail, "Known failure") ], "/regression/parameterized/:/nullable/datatypes/FixedString/utf8": [ (Fail, "Known failure") ], "/regression/parameterized/:/nullable/datatypes/FixedString/ascii": [ (Fail, "Known failure") ], "/regression/parameterized/:/nullable/datatypes/Enum/utf8": [ (Fail, "Known failure") ], "/regression/parameterized/:/nullable/datatypes/Enum/ascii": [ (Fail, "Known failure") ], "/regression/parameterized/:/functions and values/Null": [ (Fail, "Known failure") ], "/regression/parameterized/:/datatypes/String/utf8": [ (Fail, "Known failure") ], "/regression/parameterized/:/datatypes/FixedString/utf8": [ (Fail, "Known failure") ], "/regression/parameterized/:/datatypes/Enum/utf8": [(Fail, "Known failure")] } Module(run=regression, xfails=xfails)
) wait_backup_command_status(not_run_pod, f'upload {backup_name}', expected_status='success') with Then("check ClickhouseBackupDoesntRunTooLong gone away"): resolved = alerts.wait_alert_state("ClickhouseBackupDoesntRunTooLong", "firing", expected_state=False, labels={"pod_name": not_run_pod}) assert resolved, error( "can't get ClickhouseBackupDoesntRunTooLong alert is gone away") if main(): with Module("main"): prometheus_operator_spec, prometheus_spec, alertmanager_spec, clickhouse_operator_spec, chi = alerts.initialize( chi_file='configs/test-cluster-for-backups.yaml', chi_template_file='templates/tpl-clickhouse-backups.yaml', chi_name='test-cluster-for-backups', ) minio_spec = get_minio_spec() with Module("backup_alerts"): test_cases = [ test_backup_is_success, test_backup_is_down, test_backup_failed, test_backup_duration, test_backup_size, test_backup_not_run,