def restart_zookeeper():
     kubectl.launch(
         f"exec -n {kubectl.namespace} zookeeper-0 -- sh -c \"kill 1\"",
         ok_to_fail=True,
     )
     clickhouse.query_with_error(chi_name, "SELECT name, path FROM system.zookeeper WHERE path='/'", host=svc1)
     clickhouse.query_with_error(chi_name, "SELECT name, path FROM system.zookeeper WHERE path='/'", host=svc2)
    def make_too_many_connection():
        long_cmd = ""
        for _ in range(120):
            port = random.choice(["8123", "3306", "3306", "3306", "9000"])
            if port == "8123":
                # HTTPConnection metric increase after full parsing of HTTP Request, we can't provide pause between CONNECT and QUERY running
                # long_cmd += f"nc -vv 127.0.0.1 {port} <( printf \"POST / HTTP/1.1\\r\\nHost: 127.0.0.1:8123\\r\\nContent-Length: 34\\r\\n\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\");"
                long_cmd += 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),number,now() FROM numbers(30)";'
            elif port == "9000":
                long_cmd += 'clickhouse-client --send_logs_level information --idle_connection_timeout 70 --receive_timeout 70 -q "SELECT sleepEachRow(1),number,now() FROM numbers(30)";'
            # elif port == "3306":
            #     long_cmd += 'mysql -u default -h 127.0.0.1 -e "SELECT sleepEachRow(1),number, now() FROM numbers(30)";'
            else:
                long_cmd += f"printf \"1\\n1\" | nc -q 5 -i 30 -vv 127.0.0.1 {port};"

        nc_cmd = f"echo '{long_cmd} whereis nc; exit 0' | xargs --verbose -i'{{}}' --no-run-if-empty -d ';' -P 120 bash -c '{{}}' 1>/dev/null"
        with open("/tmp/nc_cmd.sh", "w") as f:
            f.write(nc_cmd)

        kubectl.launch(
            f"cp /tmp/nc_cmd.sh {too_many_connection_pod}:/tmp/nc_cmd.sh -c clickhouse-pod"
        )

        kubectl.launch(
            f"exec -n {kubectl.namespace} {too_many_connection_pod} -c clickhouse-pod -- bash /tmp/nc_cmd.sh",
            timeout=600,
        )
 def check_monitoring_chi(operator_namespace,
                          operator_pod,
                          expect_result,
                          max_retries=10):
     with Then(
             f"metrics-exporter /chi endpoint result should return {expect_result}"
     ):
         for i in range(1, max_retries):
             # check /metrics for try to refresh monitored instances
             url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                  "/metrics")
             kubectl.launch(
                 f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                 ns=operator_namespace)
             # check /chi after refresh monitored instances
             url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                  "/chi")
             out = kubectl.launch(
                 f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                 ns=operator_namespace)
             out = json.loads(out)
             if out == expect_result:
                 break
             with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                 time.sleep(i * 5)
         assert out == expect_result, error()
Esempio n. 4
0
def restart_operator(ns=settings.operator_namespace, timeout=600):
    if settings.operator_install != 'yes':
        return
    pod_name = kubectl.get("pod", name="", ns=ns, label=operator_label)["items"][0]["metadata"]["name"]
    kubectl.launch(f"delete pod {pod_name}", ns=ns, timeout=timeout)
    kubectl.wait_object("pod", name="", ns=ns, label=operator_label)
    pod_name = kubectl.get("pod", name="", ns=ns, label=operator_label)["items"][0]["metadata"]["name"]
    kubectl.wait_pod_status(pod_name, "Running", ns=ns)
 def create_success_backup():
     backup_name = backup_prefix + "-" + str(random.randint(1, 4096))
     kubectl.launch(
         f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}",
     )
     wait_backup_command_status(backup_pod,
                                command_name=f'create {backup_name}',
                                expected_status='success')
def test_backup_not_run(self, chi, minio_spec):
    not_run_pod, _, _, _ = alerts.random_pod_choice_for_callbacks(chi)
    apply_fake_backup("prepare fake backup for time metric")

    with Then(f"wait {not_run_pod} ready"):
        kubectl.wait_field("pod", not_run_pod, ".spec.containers[1].image",
                           "nginx:latest")
        kubectl.wait_field("pod", not_run_pod,
                           ".status.containerStatuses[1].ready", "true")

    with Then(f"setup {not_run_pod} backup create end time"):
        kubectl.launch(
            f'exec {not_run_pod} -c clickhouse-backup -- bash -xc \''
            'echo "# HELP clickhouse_backup_last_create_finish Last backup create finish timestamp" > /usr/share/nginx/html/metrics && '
            'echo "# TYPE clickhouse_backup_last_create_finish gauge" >> /usr/share/nginx/html/metrics && '
            f'echo "clickhouse_backup_last_create_finish {int((datetime.datetime.now() - datetime.timedelta(days=2)).timestamp())}" >> /usr/share/nginx/html/metrics '
            '\'')

        fired = alerts.wait_alert_state(
            "ClickhouseBackupDoesntRunTooLong",
            "firing",
            expected_state=True,
            sleep_time=settings.prometheus_scrape_interval,
            labels={"pod_name": not_run_pod},
            time_range='60s')
        assert fired, error(
            "can't get ClickhouseBackupDoesntRunTooLong alert in firing state")

    apply_normal_backup()

    backup_name = prepare_table_for_backup(not_run_pod, chi)
    wait_backup_pod_ready_and_curl_installed(not_run_pod)

    with When('Backup is success'):
        exec_on_backup_container(
            not_run_pod,
            f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"'
        )
        wait_backup_command_status(not_run_pod,
                                   f'create {backup_name}',
                                   expected_status='success')

        exec_on_backup_container(
            not_run_pod,
            f'curl -X POST -sL "http://127.0.0.1:7171/backup/upload/{backup_name}"'
        )
        wait_backup_command_status(not_run_pod,
                                   f'upload {backup_name}',
                                   expected_status='success')

    with Then("check ClickhouseBackupDoesntRunTooLong gone away"):
        resolved = alerts.wait_alert_state("ClickhouseBackupDoesntRunTooLong",
                                           "firing",
                                           expected_state=False,
                                           labels={"pod_name": not_run_pod})
        assert resolved, error(
            "can't get ClickhouseBackupDoesntRunTooLong alert is gone away")
 def restart_clickhouse_and_insert_to_replicated_table():
     with When(f"stop replica fetches on {stop_replica_svc}"):
         sql = "SYSTEM STOP FETCHES default.test_repl"
         kubectl.launch(
             f"exec -n {kubectl.namespace} {stop_replica_pod} -c clickhouse-pod -- clickhouse-client -q \"{sql}\"",
             ok_to_fail=True, timeout=600,
         )
         sql = "INSERT INTO default.test_repl SELECT now(), number FROM numbers(100000)"
         kubectl.launch(
             f"exec -n {kubectl.namespace} {insert_pod} -c clickhouse-pod -- clickhouse-client -q \"{sql}\"",
         )
 def run_queries_with_priority():
     sql = ""
     for i in range(50):
         sql += f"SET priority={i % 20};SELECT uniq(number) FROM numbers(20000000):"
     cmd = f"echo \\\"{sql} SELECT 1\\\" | xargs -i'{{}}' --no-run-if-empty -d ':' -P 20 clickhouse-client --time -m -n -q \\\"{{}}\\\""
     kubectl.launch(f"exec {priority_pod} -- bash -c \"{cmd}\"", timeout=120)
     clickhouse.query(
         chi["metadata"]["name"],
         "SELECT event_time, CurrentMetric_QueryPreempted FROM system.metric_log WHERE CurrentMetric_QueryPreempted > 0",
         host=priority_svc,
     )
    def create_fail_backup():
        backup_name = backup_prefix + "-" + str(random.randint(1, 4096))
        backup_dir = f"/var/lib/clickhouse/backup/{backup_name}/shadow/default/test_backup"
        kubectl.launch(
            f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- bash -c 'mkdir -v -m 0400 -p {backup_dir}'",
        )

        kubectl.launch(
            f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}",
        )
        wait_backup_command_status(backup_pod,
                                   command_name=f'create {backup_name}',
                                   expected_status='error')
def test_distributed_files_to_insert(self, prometheus_operator_spec, clickhouse_operator_spec, chi):
    delayed_pod, delayed_svc, restarted_pod, restarted_svc = alerts.random_pod_choice_for_callbacks(chi)
    clickhouse.create_distributed_table_on_cluster(chi)

    insert_sql = 'INSERT INTO default.test_distr(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1000'
    clickhouse.query(
        chi["metadata"]["name"], 'SYSTEM STOP DISTRIBUTED SENDS default.test_distr',
        pod=delayed_pod, ns=kubectl.namespace
    )

    files_to_insert_from_metrics = 0
    files_to_insert_from_disk = 0
    tries = 0
    # we need more than 50 delayed files for catch
    while files_to_insert_from_disk <= 55 and files_to_insert_from_metrics <= 55 and tries < 500:
        kubectl.launch(
            f"exec -n {kubectl.namespace} {restarted_pod} -c clickhouse-pod -- kill 1",
            ok_to_fail=True,
        )
        clickhouse.query(chi["metadata"]["name"], insert_sql, pod=delayed_pod, host=delayed_pod, ns=kubectl.namespace)
        files_to_insert_from_metrics = clickhouse.query(
            chi["metadata"]["name"], "SELECT value FROM system.metrics WHERE metric='DistributedFilesToInsert'",
            pod=delayed_pod, ns=kubectl.namespace
        )
        files_to_insert_from_metrics = int(files_to_insert_from_metrics)

        files_to_insert_from_disk = int(kubectl.launch(
            f"exec -n {kubectl.namespace} {delayed_pod} -c clickhouse-pod -- bash -c 'ls -la /var/lib/clickhouse/data/default/test_distr/*/*.bin 2>/dev/null | wc -l'",
            ok_to_fail=False,
        ))

    with When("reboot clickhouse-server pod"):
        fired = alerts.wait_alert_state(
            "ClickHouseDistributedFilesToInsertHigh", "firing", True,
            labels={"hostname": delayed_svc, "chi": chi["metadata"]["name"]}
        )
        assert fired, error("can't get ClickHouseDistributedFilesToInsertHigh alert in firing state")

    kubectl.wait_pod_status(restarted_pod, "Running", ns=kubectl.namespace)

    clickhouse.query(
        chi["metadata"]["name"], 'SYSTEM START DISTRIBUTED SENDS default.test_distr',
        pod=delayed_pod, ns=kubectl.namespace
    )

    with Then("check ClickHouseDistributedFilesToInsertHigh gone away"):
        resolved = alerts.wait_alert_state("ClickHouseDistributedFilesToInsertHigh", "firing", False, labels={"hostname": delayed_svc})
        assert resolved, error("can't check ClickHouseDistributedFilesToInsertHigh alert is gone away")

    clickhouse.drop_distributed_table_on_cluster(chi)
    def reboot_clickhouse_and_distributed_exection():
        # we need 70 delayed files for catch
        insert_sql = 'INSERT INTO default.test_distr(event_time, test) SELECT now(), number FROM system.numbers LIMIT 10000'
        select_sql = 'SELECT count() FROM default.test_distr'
        with Then("reboot clickhouse-server pod"):
            kubectl.launch(
                f"exec -n {kubectl.namespace} {restarted_pod} -c clickhouse-pod -- kill 1",
                ok_to_fail=True,
            )
            with Then("Insert to distributed table"):
                clickhouse.query(chi["metadata"]["name"], insert_sql, host=delayed_pod, ns=kubectl.namespace)

            with Then("Select from distributed table"):
                clickhouse.query_with_error(chi["metadata"]["name"], select_sql, host=delayed_pod,
                                            ns=kubectl.namespace)
Esempio n. 12
0
def check_alert_state(alert_name,
                      prometheus_pod,
                      alert_state="firing",
                      labels=None,
                      time_range="10s"):
    with Then(
            f"check {alert_name} for state {alert_state} and {labels} labels in {time_range}"
    ):
        cmd = f"exec -n {settings.prometheus_namespace} {prometheus_pod} -c prometheus -- "
        cmd += "wget -qO- 'http://127.0.0.1:9090/api/v1/query?query=ALERTS{"
        if labels is None:
            labels = {}
        if not isinstance(labels, dict):
            fail(f"Invalid labels={labels}")
        labels.update({"alertname": alert_name, "alertstate": alert_state})
        cmd += ",".join(
            [f"{name}=\"{value}\"" for name, value in labels.items()])
        cmd += f"}}[{time_range}]' 2>/dev/null"
        out = kubectl.launch(cmd)
        out = json.loads(out)
        if not ("status" in out and out["status"] == "success"):
            fail("wrong response from prometheus query API")
        if len(out["data"]["result"]) == 0:
            with Then("not present, empty result"):
                return False
        result_labels = out["data"]["result"][0]["metric"].items()
        exists = all(item in result_labels for item in labels.items())
        with Then("got result and contains labels"
                  if exists else "got result, but doesn't contain labels"):
            return exists
def test_too_many_connections(self, prometheus_operator_spec, clickhouse_operator_spec, chi):
    too_many_connection_pod, too_many_connection_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi)
    cmd = "export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get install -y netcat mysql-client"
    kubectl.launch(
        f"exec -n {kubectl.namespace} {too_many_connection_pod} -c clickhouse-pod -- bash -c  \"{cmd}\"",
        timeout=120,
    )

    def make_too_many_connection():
        long_cmd = ""
        for _ in range(120):
            port = random.choice(["8123", "3306", "3306", "3306", "9000"])
            if port == "8123":
                # HTTPConnection metric increase after full parsing of HTTP Request, we can't provide pause between CONNECT and QUERY running
                # long_cmd += f"nc -vv 127.0.0.1 {port} <( printf \"POST / HTTP/1.1\\r\\nHost: 127.0.0.1:8123\\r\\nContent-Length: 34\\r\\n\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\");"
                long_cmd += 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),number,now() FROM numbers(30)";'
            elif port == "9000":
                long_cmd += 'clickhouse-client --send_logs_level information --idle_connection_timeout 70 --receive_timeout 70 -q "SELECT sleepEachRow(1),number,now() FROM numbers(30)";'
            # elif port == "3306":
            #     long_cmd += 'mysql -u default -h 127.0.0.1 -e "SELECT sleepEachRow(1),number, now() FROM numbers(30)";'
            else:
                long_cmd += f"printf \"1\\n1\" | nc -q 5 -i 30 -vv 127.0.0.1 {port};"

        nc_cmd = f"echo '{long_cmd} whereis nc; exit 0' | xargs --verbose -i'{{}}' --no-run-if-empty -d ';' -P 120 bash -c '{{}}' 1>/dev/null"
        with open("/tmp/nc_cmd.sh", "w") as f:
            f.write(nc_cmd)

        kubectl.launch(
            f"cp /tmp/nc_cmd.sh {too_many_connection_pod}:/tmp/nc_cmd.sh -c clickhouse-pod"
        )

        kubectl.launch(
            f"exec -n {kubectl.namespace} {too_many_connection_pod} -c clickhouse-pod -- bash /tmp/nc_cmd.sh",
            timeout=600,
        )

    with Then("check ClickHouseTooManyConnections firing"):
        fired = alerts.wait_alert_state(
            "ClickHouseTooManyConnections", "firing", True, labels={"hostname": too_many_connection_svc},
            time_range='90s', callback=make_too_many_connection
        )
        assert fired, error("can't get ClickHouseTooManyConnections alert in firing state")

    with Then("check ClickHouseTooManyConnections gone away"):
        resolved = alerts.wait_alert_state("ClickHouseTooManyConnections", "firing", False, labels={"hostname": too_many_connection_svc})
        assert resolved, error("can't check ClickHouseTooManyConnections alert is gone away")
def exec_on_backup_container(backup_pod,
                             cmd,
                             ns=settings.test_namespace,
                             ok_to_fail=False,
                             timeout=60,
                             container='clickhouse-backup'):
    return kubectl.launch(f'exec -n {ns} {backup_pod} -c {container} -- {cmd}',
                          ok_to_fail=ok_to_fail,
                          timeout=timeout)
def test_too_much_running_queries(self, prometheus_operator_spec, clickhouse_operator_spec, chi):
    _, _, too_many_queries_pod, too_many_queries_svc = alerts.random_pod_choice_for_callbacks(chi)
    cmd = "export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get install -y mysql-client"
    kubectl.launch(
        f"exec -n {kubectl.namespace} {too_many_queries_pod} -c clickhouse-pod -- bash -c  \"{cmd}\"",
        ok_to_fail=True,
    )

    def make_too_many_queries():
        long_cmd = ""
        for _ in range(90):
            port = random.choice(["8123", "3306", "9000"])
            if port == "9000":
                long_cmd += 'clickhouse-client -q "SELECT sleepEachRow(1),now() FROM numbers(60)";'
            if port == "3306":
                long_cmd += 'mysql -h 127.0.0.1 -P 3306 -u default -e "SELECT sleepEachRow(1),now() FROM numbers(60)";'
            if port == "8123":
                long_cmd += 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),now() FROM numbers(60)";'

        long_cmd = f"echo '{long_cmd}' | xargs --verbose -i'{{}}' --no-run-if-empty -d ';' -P 100 bash -c '{{}}' 1>/dev/null"
        with open("/tmp/long_cmd.sh", "w") as f:
            f.write(long_cmd)

        kubectl.launch(
            f"cp /tmp/long_cmd.sh {too_many_queries_pod}:/tmp/long_cmd.sh -c clickhouse-pod"
        )
        kubectl.launch(
            f"exec -n {kubectl.namespace} {too_many_queries_pod} -c clickhouse-pod -- bash /tmp/long_cmd.sh",
            timeout=90,
        )

    with Then("check ClickHouseTooManyRunningQueries firing"):
        fired = alerts.wait_alert_state(
            "ClickHouseTooManyRunningQueries", "firing", True, labels={"hostname": too_many_queries_svc},
            callback=make_too_many_queries, time_range="30s"
        )
        assert fired, error("can't get ClickHouseTooManyConnections alert in firing state")

    with Then("check ClickHouseTooManyConnections gone away"):
        resolved = alerts.wait_alert_state("ClickHouseTooManyRunningQueries", "firing", False, labels={"hostname": too_many_queries_svc},
                                           sleep_time=settings.prometheus_scrape_interval)
        assert resolved, error("can't check ClickHouseTooManyConnections alert is gone away")
def query(chi_name,
          sql,
          with_error=False,
          host="127.0.0.1",
          port="9000",
          user="",
          pwd="",
          ns=settings.test_namespace,
          timeout=60,
          advanced_params="",
          pod="",
          container="clickhouse-pod"):
    pod_names = kubectl.get_pod_names(chi_name, ns)
    pod_name = pod_names[0]
    for p in pod_names:
        if host in p or p == pod:
            pod_name = p
            break

    pwd_str = "" if pwd == "" else f"--password={pwd}"
    user_str = "" if user == "" else f"--user={user}"

    if with_error:
        return kubectl.launch(
            f"exec {pod_name} -n {ns} -c {container}"
            f" --"
            f" clickhouse-client -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}"
            f" --query=\"{sql}\""
            f" 2>&1",
            timeout=timeout,
            ns=ns,
            ok_to_fail=True,
        )
    else:
        return kubectl.launch(
            f"exec {pod_name} -n {ns} -c {container}"
            f" -- "
            f"clickhouse-client -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}"
            f"--query=\"{sql}\"",
            timeout=timeout,
            ns=ns,
        )
Esempio n. 17
0
def wait_zookeeper_ready(svc_name='zookeeper', pod_count=3, retries=10):
    for i in range(retries):
        ready_pods = kubectl.launch(
            f"get pods | grep {svc_name} | grep Running | grep '1/1' | wc -l")
        ready_endpoints = "0"
        if ready_pods == str(pod_count):
            ready_endpoints = kubectl.launch(
                f"get endpoints {svc_name} -o json | jq '.subsets[].addresses[].ip' | wc -l"
            )
            if ready_endpoints == str(pod_count):
                break
        else:
            with Then(
                    f"Zookeeper Not ready yet ready_endpoints={ready_endpoints} ready_pods={ready_pods}, expected pod_count={pod_count}. "
                    f"Wait for {i*3} seconds"):
                time.sleep(i * 3)
        if i == retries - 1:
            Fail(
                f"Zookeeper failed, ready_endpoints={ready_endpoints} ready_pods={ready_pods}, expected pod_count={pod_count}"
            )
    def make_too_many_queries():
        long_cmd = ""
        for _ in range(90):
            port = random.choice(["8123", "3306", "9000"])
            if port == "9000":
                long_cmd += 'clickhouse-client -q "SELECT sleepEachRow(1),now() FROM numbers(60)";'
            if port == "3306":
                long_cmd += 'mysql -h 127.0.0.1 -P 3306 -u default -e "SELECT sleepEachRow(1),now() FROM numbers(60)";'
            if port == "8123":
                long_cmd += 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),now() FROM numbers(60)";'

        long_cmd = f"echo '{long_cmd}' | xargs --verbose -i'{{}}' --no-run-if-empty -d ';' -P 100 bash -c '{{}}' 1>/dev/null"
        with open("/tmp/long_cmd.sh", "w") as f:
            f.write(long_cmd)

        kubectl.launch(
            f"cp /tmp/long_cmd.sh {too_many_queries_pod}:/tmp/long_cmd.sh -c clickhouse-pod"
        )
        kubectl.launch(
            f"exec -n {kubectl.namespace} {too_many_queries_pod} -c clickhouse-pod -- bash /tmp/long_cmd.sh",
            timeout=90,
        )
def test_read_only_replica(self, prometheus_operator_spec, clickhouse_operator_spec, chi):
    read_only_pod, read_only_svc, other_pod, other_svc = alerts.random_pod_choice_for_callbacks(chi)
    chi_name = chi["metadata"]["name"]
    clickhouse.create_table_on_cluster(
        chi,
        'all-replicated', 'default.test_repl',
        '(event_time DateTime, test UInt64) ' +
        'ENGINE ReplicatedMergeTree(\'/clickhouse/tables/{installation}-{shard}/test_repl\', \'{replica}\') ORDER BY tuple()'
    )

    def restart_zookeeper():
        kubectl.launch(
            f"exec -n {kubectl.namespace} zookeeper-0 -- sh -c \"kill 1\"",
            ok_to_fail=True,
        )
        clickhouse.query_with_error(chi_name, "INSERT INTO default.test_repl VALUES(now(),rand())", host=read_only_svc)

    with Then("check ClickHouseReadonlyReplica firing"):
        fired = alerts.wait_alert_state("ClickHouseReadonlyReplica", "firing", True, labels={"hostname": read_only_svc},
                                        time_range='30s', sleep_time=settings.prometheus_scrape_interval, callback=restart_zookeeper)
        assert fired, error("can't get ClickHouseReadonlyReplica alert in firing state")
    with Then("check ClickHouseReadonlyReplica gone away"):
        resolved = alerts.wait_alert_state("ClickHouseReadonlyReplica", "firing", False, labels={"hostname": read_only_svc})
        assert resolved, error("can't check ClickHouseReadonlyReplica alert is gone away")

    kubectl.wait_pod_status("zookeeper-0", "Running", ns=kubectl.namespace)
    kubectl.wait_jsonpath("pod", "zookeeper-0", "{.status.containerStatuses[0].ready}", "true",
                          ns=kubectl.namespace)

    for i in range(11):
        zookeeper_status = kubectl.launch(
            f"exec -n {kubectl.namespace} zookeeper-0 -- sh -c \"echo ruok | nc 127.0.0.1 2181\"", ok_to_fail=True
        )
        if "imok" in zookeeper_status:
            break
        elif i == 10:
            fail(f"invalid zookeeper status after {i} retries")
        with Then("zookeper is not ready, wait 2 seconds"):
            time.sleep(2)

    clickhouse.query_with_error(
        chi_name, "SYSTEM RESTART REPLICAS; SYSTEM SYNC REPLICA default.test_repl",
        host=read_only_svc, timeout=240
    )
    clickhouse.query_with_error(
        chi_name, "SYSTEM RESTART REPLICAS; SYSTEM SYNC REPLICA default.test_repl",
        host=other_svc, timeout=240
    )

    clickhouse.drop_table_on_cluster(chi, 'all-replicated', 'default.test_repl')
Esempio n. 20
0
def wait_keeper_ready(keeper_type='zookeeper', pod_count=3, retries=10):
    svc_name = 'zookeeper-client' if keeper_type == "zookeeper-operator" else 'zookeeper'
    expected_containers = "2/2" if keeper_type == "clickhouse-keeper" else "1/1"
    expected_pod_prefix = "clickhouse-keeper" if keeper_type == "clickhouse-keeper" else "zookeeper"
    for i in range(retries):
        ready_pods = kubectl.launch(
            f"get pods | grep {expected_pod_prefix} | grep Running | grep '{expected_containers}' | wc -l"
        )
        ready_endpoints = "0"
        if ready_pods == str(pod_count):
            ready_endpoints = kubectl.launch(
                f"get endpoints {svc_name} -o json | jq '.subsets[].addresses[].ip' | wc -l"
            )
            if ready_endpoints == str(pod_count):
                break
        else:
            with Then(
                    f"Zookeeper Not ready yet ready_endpoints={ready_endpoints} ready_pods={ready_pods}, expected pod_count={pod_count}. "
                    f"Wait for {i * 3} seconds"):
                time.sleep(i * 3)
        if i == retries - 1:
            Fail(
                f"Zookeeper failed, ready_endpoints={ready_endpoints} ready_pods={ready_pods}, expected pod_count={pod_count}"
            )
Esempio n. 21
0
def set_operator_version(version, ns=settings.operator_namespace, timeout=600):
    if settings.operator_install != 'yes':
        return
    operator_image = f"{settings.operator_docker_repo}:{version}"
    metrics_exporter_image = f"{settings.metrics_exporter_docker_repo}:{version}"

    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator clickhouse-operator={operator_image}", ns=ns)
    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator metrics-exporter={metrics_exporter_image}", ns=ns)
    kubectl.launch("rollout status deployment.v1.apps/clickhouse-operator", ns=ns, timeout=timeout)
    if kubectl.get_count("pod", ns=ns, label=operator_label) == 0:
        fail("invalid clickhouse-operator pod count")
Esempio n. 22
0
    def check_zk_root_znode(chi, pod_count, retry_count=5):
        for pod_num in range(pod_count):
            out = ""
            expected_out = ""
            for i in range(retry_count):
                if keeper_type == "zookeeper-operator":
                    expected_out = "[clickhouse, zookeeper, zookeeper-operator]"
                    keeper_cmd = './bin/zkCli.sh ls /'
                    pod_prefix = "zookeeper"
                elif keeper_type == "zookeeper":
                    expected_out = "[clickhouse, zookeeper]"
                    keeper_cmd = './bin/zkCli.sh ls /'
                    pod_prefix = "zookeeper"
                else:
                    expected_out = "clickhouse"
                    keeper_cmd = "if [[ ! $(command -v zookeepercli) ]]; then "
                    keeper_cmd += "wget -q -O /tmp/zookeepercli.deb https://github.com/outbrain/zookeepercli/releases/download/v1.0.12/zookeepercli_1.0.12_amd64.deb; "
                    keeper_cmd += "dpkg -i /tmp/zookeepercli.deb; "
                    keeper_cmd += "fi; "
                    keeper_cmd += "zookeepercli -servers 127.0.0.1:2181 -c ls /"
                    pod_prefix = "clickhouse-keeper"

                out = kubectl.launch(
                    f"exec {pod_prefix}-{pod_num} -- bash -ce '{keeper_cmd}'",
                    ns=settings.test_namespace,
                    ok_to_fail=True)
                if expected_out in out:
                    break
                else:
                    with Then(
                            f"{keeper_type} ROOT NODE not ready, wait {(i + 1) * 3} sec"
                    ):
                        time.sleep((i + 1) * 3)
            assert expected_out in out, f"Unexpected {keeper_type} `ls /` output"

        out = clickhouse.query(
            chi["metadata"]["name"],
            "SELECT count() FROM system.zookeeper WHERE path='/'")
        expected_out = {
            "zookeeper": "2",
            "zookeeper-operator": "3",
            "clickhouse-keeper": "1",
        }
        assert expected_out[keeper_type] == out.strip(
            " \t\r\n"
        ), f"Unexpected `SELECT count() FROM system.zookeeper WHERE path='/'` output {out}"
 def rewrite_dns_on_clickhouse_server(write_new=True):
     dns = new_dns if write_new else old_dns
     kubectl.launch(
         f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- bash -c \"printf \\\"{dns}\\\" > /etc/resolv.conf\"",
         ok_to_fail=False,
     )
     kubectl.launch(
         f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- clickhouse-client --echo -mn "
         f"-q \"SYSTEM DROP DNS CACHE; SELECT count() FROM cluster('all-sharded',system.metrics)\"",
         ok_to_fail=True,
     )
     kubectl.launch(
         f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- clickhouse-client --echo -mn "
         f"-q \"SELECT sum(ProfileEvent_DNSError) FROM system.metric_log;SELECT * FROM system.events WHERE event='DNSError' FORMAT Vertical; SELECT * FROM system.errors FORMAT Vertical\"",
         ok_to_fail=False,
     )
def test_clickhouse_dns_errors(self, prometheus_operator_spec, clickhouse_operator_spec, chi):
    random_idx = random.randint(0, 1)
    clickhouse_pod = chi["status"]["pods"][random_idx]
    clickhouse_svc = chi["status"]["fqdns"][random_idx]

    old_dns = kubectl.launch(
        f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- cat /etc/resolv.conf",
        ok_to_fail=False,
    )
    new_dns = re.sub(r'^nameserver (.+)', 'nameserver 1.1.1.1', old_dns, flags=re.MULTILINE)

    def rewrite_dns_on_clickhouse_server(write_new=True):
        dns = new_dns if write_new else old_dns
        kubectl.launch(
            f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- bash -c \"printf \\\"{dns}\\\" > /etc/resolv.conf\"",
            ok_to_fail=False,
        )
        kubectl.launch(
            f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- clickhouse-client --echo -mn "
            f"-q \"SYSTEM DROP DNS CACHE; SELECT count() FROM cluster('all-sharded',system.metrics)\"",
            ok_to_fail=True,
        )
        kubectl.launch(
            f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- clickhouse-client --echo -mn "
            f"-q \"SELECT sum(ProfileEvent_DNSError) FROM system.metric_log;SELECT * FROM system.events WHERE event='DNSError' FORMAT Vertical; SELECT * FROM system.errors FORMAT Vertical\"",
            ok_to_fail=False,
        )

    with When("rewrite /etc/resolv.conf in clickhouse-server pod"):
        fired = alerts.wait_alert_state(
            "ClickHouseDNSErrors", "firing", True, labels={"hostname": clickhouse_svc},
            time_range='20s', callback=rewrite_dns_on_clickhouse_server
        )
        assert fired, error("can't get ClickHouseDNSErrors alert in firing state")

    with Then("check ClickHouseDNSErrors gone away"):
        rewrite_dns_on_clickhouse_server(write_new=False)
        resolved = alerts.wait_alert_state("ClickHouseDNSErrors", "firing", False, labels={"hostname": clickhouse_svc})
        assert resolved, error("can't check ClickHouseDNSErrors alert is gone away")
Esempio n. 25
0
    def check_zk_root_znode(chi, pod_count, zk_retry=5):
        for pod_num in range(pod_count):
            out = ""
            for i in range(zk_retry):
                out = kubectl.launch(
                    f"exec zookeeper-{pod_num} -- bash -ce './bin/zkCli.sh ls /'",
                    ns=settings.test_namespace,
                    ok_to_fail=True)
                if "[clickhouse, zookeeper]" in out:
                    break
                else:
                    with Then(
                            f"Zookeeper ROOT NODE not ready, wait { (i+1)*3} sec"
                    ):
                        time.sleep((i + 1) * 3)
            assert "[clickhouse, zookeeper]" in out, "Unexpected `zkCli.sh ls /` output"

        out = clickhouse.query(
            chi["metadata"]["name"],
            "SELECT count() FROM system.zookeeper WHERE path='/'")
        assert "2" == out.strip(
            " \t\r\n"
        ), f"Unexpected `SELECT count() FROM system.zookeeper WHERE path='/'` output {out}"
    def check_monitoring_metrics(operator_namespace,
                                 operator_pod,
                                 expect_result,
                                 max_retries=10):
        with Then(
                f"metrics-exporter /metrics endpoint result should match with {expect_result}"
        ):
            for i in range(1, max_retries):
                url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                     "/metrics")
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                    ns=operator_namespace)
                all_strings_expected_done = True
                for string, exists in expect_result.items():
                    all_strings_expected_done = (exists == (string in out))
                    if not all_strings_expected_done:
                        break

                if all_strings_expected_done:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert all_strings_expected_done, error()
Esempio n. 27
0
def test_ch_001(self):
    util.require_zookeeper()
    quorum_template = "manifests/chit/tpl-clickhouse-21.8.yaml"
    chit_data = yaml_manifest.get_manifest_data(
        util.get_full_path(quorum_template))

    kubectl.launch(f"delete chit {chit_data['metadata']['name']}",
                   ns=settings.test_namespace,
                   ok_to_fail=True)
    kubectl.create_and_check("manifests/chi/test-ch-001-insert-quorum.yaml", {
        "apply_templates": {quorum_template},
        "pod_count": 2,
        "do_not_delete": 1,
    })

    chi = yaml_manifest.get_chi_name(
        util.get_full_path("manifests/chi/test-ch-001-insert-quorum.yaml"))
    chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi)
    util.wait_clickhouse_cluster_ready(chi_data)

    host0 = "chi-test-ch-001-insert-quorum-default-0-0"
    host1 = "chi-test-ch-001-insert-quorum-default-0-1"

    create_table = """
    create table t1 on cluster default (a Int8, d Date default today())
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by d order by a 
    TTL d + interval 5 second
    SETTINGS merge_with_ttl_timeout=5""".replace('\r', '').replace('\n', '')

    create_mv_table2 = """
    create table t2 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv_table3 = """
    create table t3 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv2 = "create materialized view t_mv2 on cluster default to t2 as select a from t1"
    create_mv3 = "create materialized view t_mv3 on cluster default to t3 as select a from t1"

    with Given("Tables t1, t2, t3 and MVs t1->t2, t1-t3 are created"):
        clickhouse.query(chi, create_table)
        clickhouse.query(chi, create_mv_table2)
        clickhouse.query(chi, create_mv_table3)

        clickhouse.query(chi, create_mv2)
        clickhouse.query(chi, create_mv3)

        with When("Add a row to an old partition"):
            clickhouse.query(chi,
                             "insert into t1(a,d) values(6, today()-1)",
                             host=host0)

        with When("Stop fetches for t1 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t1", host=host1)

            with Then("Wait 10 seconds and the data should be dropped by TTL"):
                time.sleep(10)
                out = clickhouse.query(chi,
                                       "select count() from t1 where a=6",
                                       host=host0)
                assert out == "0", error()

        with When("Resume fetches for t1 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t1",
                             host=host1)
            time.sleep(5)

            with Then("Inserts should resume"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(7)",
                                 host=host0)

        clickhouse.query(chi, "insert into t1(a) values(1)")

        with When("Stop fetches for t2 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t2", host=host1)

            with Then("Insert should fail since it can not reach the quorum"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(2)", host=host0)
                assert "Timeout while waiting for quorum" in out, error()

        # kubectl(f"exec {host0}-0 -n test -- cp /var/lib//clickhouse/data/default/t2/all_1_1_0/a.mrk2 /var/lib//clickhouse/data/default/t2/all_1_1_0/a.bin")
        # with Then("Corrupt data part in t2"):
        #    kubectl(f"exec {host0}-0 -n test -- sed -i \"s/b/c/\" /var/lib/clickhouse/data/default/t2/all_1_1_0/a.bin")

        with When("Resume fetches for t2 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t2",
                             host=host1)
            i = 0
            while "2" != clickhouse.query(
                    chi,
                    "select active_replicas from system.replicas where database='default' and table='t1'",
                    pod=host0) and i < 10:
                with Then("Not ready, wait 5 seconds"):
                    time.sleep(5)
                    i += 1

            with Then(
                    "Inserts should fail with an error regarding not satisfied quorum"
            ):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(3)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out, error(
                )

            with And("Second insert of the same block should pass"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(3)",
                                 host=host0)

            with And("Insert of the new block should fail"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(4)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out, error(
                )

            with And(
                    "Second insert of the same block with 'deduplicate_blocks_in_dependent_materialized_views' setting should fail"
            ):
                out = clickhouse.query_with_error(
                    chi,
                    "set deduplicate_blocks_in_dependent_materialized_views=1; insert into t1(a) values(5)",
                    host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out, error(
                )

        out = clickhouse.query_with_error(
            chi,
            "select t1.a t1_a, t2.a t2_a from t1 left outer join t2 using (a) order by t1_a settings join_use_nulls=1"
        )
        note(out)
Esempio n. 28
0
def set_metrics_exporter_version(version, ns=settings.operator_namespace):
    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator metrics-exporter=altinity/metrics-exporter:{version}", ns=ns)
    kubectl.launch("rollout status deployment.v1.apps/clickhouse-operator", ns=ns)
 def restart_zookeeper():
     kubectl.launch(
         f"exec -n {kubectl.namespace} {zookeeper_pod} -- sh -c \"kill 1\"",
         ok_to_fail=True,
     )
 def reboot_clickhouse_server():
     kubectl.launch(
         f"exec -n {kubectl.namespace} {clickhouse_pod} -c clickhouse-pod -- kill 1",
         ok_to_fail=True,
     )