def test_examples01_1(self):
    kubectl.create_and_check(
        manifest=
        "../../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml",
        check={"object_counts": {
            "statefulset": 1,
            "pod": 1,
            "service": 2,
        }})
def test_examples02_1(self):
    kubectl.create_and_check(
        manifest=
        "../../docs/chi-examples/03-persistent-volume-01-default-volume.yaml",
        check={
            "pod_count": 1,
            "pod_volumes": {
                "/var/lib/clickhouse",
                "/var/log/clickhouse-server",
            },
        })
def test_examples02_2(self):
    kubectl.create_and_check(
        manifest=
        "../../docs/chi-examples/03-persistent-volume-02-pod-template.yaml",
        check={
            "pod_count": 1,
            "pod_image": "clickhouse/clickhouse-server:22.3",
            "pod_volumes": {
                "/var/lib/clickhouse",
                "/var/log/clickhouse-server",
            },
        })
def test_version_changed(self, prometheus_operator_spec, clickhouse_operator_spec, chi):
    changed_pod, changed_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi)

    with When("apply changed settings"):
        kubectl.create_and_check(
            manifest="manifests/chi/test-cluster-for-alerts-changed-settings.yaml",
            check={
                "apply_templates": [
                    "manifests/chit/tpl-clickhouse-stable.yaml",
                    "manifests/chit/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete": 1
            }
        )
        prometheus_scrape_interval = 15
        with Then(f"wait prometheus_scrape_interval={prometheus_scrape_interval}*2 sec"):
            time.sleep(prometheus_scrape_interval * 2)

    with Then("check ClickHouseVersionChanged firing"):
        fired = alerts.wait_alert_state(
            "ClickHouseVersionChanged", "firing", True, labels={"hostname": changed_svc}, time_range="30s", sleep_time=settings.prometheus_scrape_interval
        )
        assert fired, error("can't get ClickHouseVersionChanged alert in firing state")

    with When("rollback changed settings"):
        kubectl.create_and_check(
            manifest="manifests/chi/test-cluster-for-alerts.yaml",
            check={
                "apply_templates": [
                    "manifests/chit/tpl-clickhouse-latest.yaml",
                    "manifests/chit/tpl-clickhouse-alerts.yaml",
                    "manifests/chit/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete": 1
            }
        )

    with Then("check ClickHouseVersionChanged gone away"):
        resolved = alerts.wait_alert_state("ClickHouseVersionChanged", "firing", False, labels={"hostname": changed_svc}, sleep_time=30)
        assert resolved, error("can't check ClickHouseVersionChanged alert is gone away")
示例#5
0
def install_clickhouse_and_keeper(chi_file, chi_template_file, chi_name,
                                  keeper_type='zookeeper', keeper_manifest='', force_keeper_install=False, clean_ns=True, keeper_install_first=True,
                                  make_object_count=True):
    if keeper_manifest == '':
        if keeper_type == 'zookeeper':
            keeper_manifest = 'zookeeper-1-node-1GB-for-tests-only.yaml'
        if keeper_type == 'clickhouse-keeper':
            keeper_manifest = 'clickhouse-keeper-1-node-256M-for-test-only.yaml'
        if keeper_type == 'zookeeper-operator':
            keeper_manifest = 'zookeeper-operator-1-node.yaml'

    with Given("install zookeeper/clickhouse-keeper + clickhouse"):
        if clean_ns:
            kubectl.delete_ns(settings.test_namespace, ok_to_fail=True, timeout=600)
            kubectl.create_ns(settings.test_namespace)
        # when create clickhouse, need install ZK before CH
        if keeper_install_first:
            require_keeper(keeper_type=keeper_type, keeper_manifest=keeper_manifest, force_install=force_keeper_install)

        chi_manifest_data = yaml_manifest.get_manifest_data(get_full_path(chi_file))
        layout = chi_manifest_data["spec"]["configuration"]["clusters"][0]["layout"]
        expected_nodes = 1 * layout["shardsCount"] * layout["replicasCount"]
        check = {
            "apply_templates": [
                chi_template_file,
                "manifests/chit/tpl-persistent-volume-100Mi.yaml"
            ],
            "do_not_delete": 1
        }
        if make_object_count:
            check["object_counts"] = {
                "statefulset": expected_nodes,
                "pod": expected_nodes,
                "service": expected_nodes + 1,
            }
        kubectl.create_and_check(
            manifest=chi_file,
            check=check,
        )
        clickhouse_operator_spec = kubectl.get(
            "pod", name="", ns=settings.operator_namespace, label="-l app=clickhouse-operator"
        )
        chi = kubectl.get("chi", ns=settings.test_namespace, name=chi_name)

        # when re-scale clickhouse, need install Keeper after CH to follow ACM logic
        if not keeper_install_first:
            require_keeper(keeper_type=keeper_type, keeper_manifest=keeper_manifest, force_install=force_keeper_install)

        return clickhouse_operator_spec, chi
def apply_fake_backup(message):
    with Given(message):
        kubectl.create_and_check(
            manifest="manifests/chi/test-cluster-for-backups-fake.yaml",
            check={
                "apply_templates": [
                    "manifests/chit/tpl-clickhouse-backups-fake.yaml",
                    "manifests/chit/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete":
                1
            })
示例#7
0
def test_ch_002(self):
    kubectl.create_and_check(
        "manifests/chi/test-ch-002-row-level.yaml", {
            "apply_templates": {"manifests/chit/tpl-clickhouse-21.8.yaml"},
            "do_not_delete": 1,
        })

    chi = "test-ch-002-row-level"
    create_table = """create table test (d Date default today(), team LowCardinality(String), user String) Engine = MergeTree() PARTITION BY d ORDER BY d;"""

    with When("Create test table"):
        clickhouse.query(chi, create_table)

    with And("Insert some data"):
        clickhouse.query(
            chi,
            "INSERT INTO test(team, user) values('team1', 'user1'),('team2', 'user2'),('team3', 'user3'),('team4', 'user4')"
        )

    with Then(
            "Make another query for different users. It should be restricted to corresponding team by row-level security"
    ):
        for user in ['user1', 'user2', 'user3', 'user4']:
            out = clickhouse.query(chi,
                                   "select user from test",
                                   user=user,
                                   pwd=user)
            assert out == user, error()

    with Then(
            "Make a count() query for different users. It should be restricted to corresponding team by row-level security"
    ):
        for user in ['user1', 'user2', 'user3', 'user4']:
            out = clickhouse.query(chi,
                                   "select count() from test",
                                   user=user,
                                   pwd=user)
            assert out == "1", error()

    kubectl.delete_chi(chi)
示例#8
0
def test_ch_001(self):
    util.require_zookeeper()
    quorum_template = "manifests/chit/tpl-clickhouse-21.8.yaml"
    chit_data = yaml_manifest.get_manifest_data(
        util.get_full_path(quorum_template))

    kubectl.launch(f"delete chit {chit_data['metadata']['name']}",
                   ns=settings.test_namespace,
                   ok_to_fail=True)
    kubectl.create_and_check("manifests/chi/test-ch-001-insert-quorum.yaml", {
        "apply_templates": {quorum_template},
        "pod_count": 2,
        "do_not_delete": 1,
    })

    chi = yaml_manifest.get_chi_name(
        util.get_full_path("manifests/chi/test-ch-001-insert-quorum.yaml"))
    chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi)
    util.wait_clickhouse_cluster_ready(chi_data)

    host0 = "chi-test-ch-001-insert-quorum-default-0-0"
    host1 = "chi-test-ch-001-insert-quorum-default-0-1"

    create_table = """
    create table t1 on cluster default (a Int8, d Date default today())
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by d order by a 
    TTL d + interval 5 second
    SETTINGS merge_with_ttl_timeout=5""".replace('\r', '').replace('\n', '')

    create_mv_table2 = """
    create table t2 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv_table3 = """
    create table t3 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv2 = "create materialized view t_mv2 on cluster default to t2 as select a from t1"
    create_mv3 = "create materialized view t_mv3 on cluster default to t3 as select a from t1"

    with Given("Tables t1, t2, t3 and MVs t1->t2, t1-t3 are created"):
        clickhouse.query(chi, create_table)
        clickhouse.query(chi, create_mv_table2)
        clickhouse.query(chi, create_mv_table3)

        clickhouse.query(chi, create_mv2)
        clickhouse.query(chi, create_mv3)

        with When("Add a row to an old partition"):
            clickhouse.query(chi,
                             "insert into t1(a,d) values(6, today()-1)",
                             host=host0)

        with When("Stop fetches for t1 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t1", host=host1)

            with Then("Wait 10 seconds and the data should be dropped by TTL"):
                time.sleep(10)
                out = clickhouse.query(chi,
                                       "select count() from t1 where a=6",
                                       host=host0)
                assert out == "0", error()

        with When("Resume fetches for t1 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t1",
                             host=host1)
            time.sleep(5)

            with Then("Inserts should resume"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(7)",
                                 host=host0)

        clickhouse.query(chi, "insert into t1(a) values(1)")

        with When("Stop fetches for t2 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t2", host=host1)

            with Then("Insert should fail since it can not reach the quorum"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(2)", host=host0)
                assert "Timeout while waiting for quorum" in out, error()

        # kubectl(f"exec {host0}-0 -n test -- cp /var/lib//clickhouse/data/default/t2/all_1_1_0/a.mrk2 /var/lib//clickhouse/data/default/t2/all_1_1_0/a.bin")
        # with Then("Corrupt data part in t2"):
        #    kubectl(f"exec {host0}-0 -n test -- sed -i \"s/b/c/\" /var/lib/clickhouse/data/default/t2/all_1_1_0/a.bin")

        with When("Resume fetches for t2 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t2",
                             host=host1)
            i = 0
            while "2" != clickhouse.query(
                    chi,
                    "select active_replicas from system.replicas where database='default' and table='t1'",
                    pod=host0) and i < 10:
                with Then("Not ready, wait 5 seconds"):
                    time.sleep(5)
                    i += 1

            with Then(
                    "Inserts should fail with an error regarding not satisfied quorum"
            ):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(3)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out, error(
                )

            with And("Second insert of the same block should pass"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(3)",
                                 host=host0)

            with And("Insert of the new block should fail"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(4)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out, error(
                )

            with And(
                    "Second insert of the same block with 'deduplicate_blocks_in_dependent_materialized_views' setting should fail"
            ):
                out = clickhouse.query_with_error(
                    chi,
                    "set deduplicate_blocks_in_dependent_materialized_views=1; insert into t1(a) values(5)",
                    host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out, error(
                )

        out = clickhouse.query_with_error(
            chi,
            "select t1.a t1_a, t2.a t2_a from t1 left outer join t2 using (a) order by t1_a settings join_use_nulls=1"
        )
        note(out)
def test_metrics_exporter_reboot(self):
    def check_monitoring_chi(operator_namespace,
                             operator_pod,
                             expect_result,
                             max_retries=10):
        with Then(
                f"metrics-exporter /chi endpoint result should return {expect_result}"
        ):
            for i in range(1, max_retries):
                # check /metrics for try to refresh monitored instances
                url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                     "/metrics")
                kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                    ns=operator_namespace)
                # check /chi after refresh monitored instances
                url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                     "/chi")
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                    ns=operator_namespace)
                out = json.loads(out)
                if out == expect_result:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert out == expect_result, error()

    with Given("clickhouse-operator is installed"):
        kubectl.wait_field("pods",
                           util.operator_label,
                           ".status.containerStatuses[*].ready",
                           "true,true",
                           ns=settings.operator_namespace)
        assert kubectl.get_count("pod",
                                 ns='--all-namespaces',
                                 label=util.operator_label) > 0, error()

        out = kubectl.launch("get pods -l app=clickhouse-operator",
                             ns=settings.operator_namespace).splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = settings.operator_namespace
        kubectl.delete_ns(kubectl.namespace, ok_to_fail=True)
        kubectl.create_ns(kubectl.namespace)
        check_monitoring_chi(operator_namespace, operator_pod, [])
        with And("created simple clickhouse installation"):
            manifest = "../../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml"
            kubectl.create_and_check(manifest=manifest,
                                     check={
                                         "object_counts": {
                                             "statefulset": 1,
                                             "pod": 1,
                                             "service": 2,
                                         },
                                         "do_not_delete": True,
                                     })
            expected_chi = [{
                "namespace":
                "test",
                "name":
                "simple-01",
                "hostnames":
                ["chi-simple-01-simple-0-0.test.svc.cluster.local"]
            }]
            check_monitoring_chi(operator_namespace, operator_pod,
                                 expected_chi)
            with When("reboot metrics exporter"):
                kubectl.launch(
                    f"exec -n {operator_namespace} {operator_pod} -c metrics-exporter -- bash -c 'kill 1'"
                )
                time.sleep(15)
                kubectl.wait_field("pods",
                                   util.operator_label,
                                   ".status.containerStatuses[*].ready",
                                   "true,true",
                                   ns=settings.operator_namespace)
                with Then("check metrics exporter still contains chi objects"):
                    check_monitoring_chi(operator_namespace, operator_pod,
                                         expected_chi)
                    kubectl.delete(util.get_full_path(manifest,
                                                      lookup_in_host=False),
                                   timeout=600)
                    check_monitoring_chi(operator_namespace, operator_pod, [])
def test_metrics_exporter_with_multiple_clickhouse_version(self):
    def check_monitoring_metrics(operator_namespace,
                                 operator_pod,
                                 expect_result,
                                 max_retries=10):
        with Then(
                f"metrics-exporter /metrics endpoint result should match with {expect_result}"
        ):
            for i in range(1, max_retries):
                url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                     "/metrics")
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                    ns=operator_namespace)
                all_strings_expected_done = True
                for string, exists in expect_result.items():
                    all_strings_expected_done = (exists == (string in out))
                    if not all_strings_expected_done:
                        break

                if all_strings_expected_done:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert all_strings_expected_done, error()

    with Given("clickhouse-operator pod exists"):
        out = kubectl.launch("get pods -l app=clickhouse-operator",
                             ns='kube-system').splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = "kube-system"

        with Then("check empty /metrics"):
            kubectl.delete_ns(kubectl.namespace, ok_to_fail=True)
            kubectl.create_ns(kubectl.namespace)
            check_monitoring_metrics(
                operator_namespace,
                operator_pod,
                expect_result={
                    'chi_clickhouse_metric_VersionInteger': False,
                })

        with Then("Install multiple clickhouse version"):
            manifest = "manifests/chi/test-017-multi-version.yaml"
            kubectl.create_and_check(manifest=manifest,
                                     check={
                                         "object_counts": {
                                             "statefulset": 2,
                                             "pod": 2,
                                             "service": 3,
                                         },
                                         "do_not_delete": True,
                                     })
            with And("Check not empty /metrics"):
                check_monitoring_metrics(
                    operator_namespace,
                    operator_pod,
                    expect_result={
                        '# HELP chi_clickhouse_metric_VersionInteger':
                        True,
                        '# TYPE chi_clickhouse_metric_VersionInteger gauge':
                        True,
                        'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-0-0':
                        True,
                        'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-1-0':
                        True,
                    })

        with Then("check empty /metrics after delete namespace"):
            kubectl.delete_ns(kubectl.namespace)
            check_monitoring_metrics(
                operator_namespace,
                operator_pod,
                expect_result={
                    'chi_clickhouse_metric_VersionInteger': False,
                })