Пример #1
0
def test_operator_restart(config, version=settings.operator_version):
    with Given(f"clickhouse-operator {version}"):
        set_operator_version(version)
        config = util.get_full_path(config)
        chi = manifest.get_chi_name(config)
        cluster = chi

        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                       ".status.startTime")

        with When("Restart operator"):
            restart_operator()
            time.sleep(5)
            kubectl.wait_chi_status(chi, "Completed")
            kubectl.wait_objects(chi, {
                "statefulset": 1,
                "pod": 1,
                "service": 2,
            })
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-{cluster}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

        kubectl.delete_chi(chi)
Пример #2
0
def test_operator_upgrade(config,
                          version_from,
                          version_to=settings.operator_version):
    version_to = settings.operator_version
    with Given(f"clickhouse-operator {version_from}"):
        set_operator_version(version_from)
        config = util.get_full_path(config)
        chi = manifest.get_chi_name(config)

        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0",
                                       ".status.startTime")

        with When(f"upgrade operator to {version_to}"):
            set_operator_version(version_to, timeout=120)
            time.sleep(5)
            kubectl.wait_chi_status(chi, "Completed", retries=6)
            kubectl.wait_objects(chi, {
                "statefulset": 1,
                "pod": 1,
                "service": 2
            })
            new_start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

        kubectl.delete_chi(chi)
Пример #3
0
def test_017():
    kubectl.create_and_check(config="configs/test-017-multi-version.yaml",
                             check={
                                 "pod_count": 4,
                                 "do_not_delete": 1,
                             })
    chi = "test-017-multi-version"
    queries = [
        "CREATE TABLE test_max (epoch Int32, offset SimpleAggregateFunction(max, Int64)) ENGINE = AggregatingMergeTree() ORDER BY epoch",
        "insert into test_max select 0, 3650487030+number from numbers(5) settings max_block_size=1",
        "insert into test_max select 0, 5898217176+number from numbers(5)",
        "insert into test_max select 0, 5898217176+number from numbers(10) settings max_block_size=1",
        "OPTIMIZE TABLE test_max FINAL"
    ]

    for q in queries:
        print(f"{q}")
    test_query = "select min(offset), max(offset) from test_max"
    print(f"{test_query}")

    for shard in range(4):
        host = f"chi-{chi}-default-{shard}-0"
        for q in queries:
            clickhouse.query(chi, host=host, sql=q)
        out = clickhouse.query(chi, host=host, sql=test_query)
        ver = clickhouse.query(chi, host=host, sql="select version()")

        print(f"version: {ver}, result: {out}")

    kubectl.delete_chi(chi)
Пример #4
0
def install_clickhouse_and_zookeeper(chi_file, chi_template_file, chi_name):
    with Given("install zookeeper+clickhouse"):
        kubectl.delete_ns(settings.test_namespace, ok_to_fail=True, timeout=600)
        kubectl.create_ns(settings.test_namespace)
        util.require_zookeeper()
        kubectl.create_and_check(
            config=chi_file,
            check={
                "apply_templates": [
                    chi_template_file,
                    "templates/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete": 1
            }
        )
        clickhouse_operator_spec = kubectl.get(
            "pod", name="", ns=settings.operator_namespace, label="-l app=clickhouse-operator"
        )
        chi = kubectl.get("chi", ns=settings.test_namespace, name=chi_name)
        return clickhouse_operator_spec, chi
Пример #5
0
def test_018():
    kubectl.create_and_check(config="configs/test-018-configmap.yaml",
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })
    chi_name = "test-018-configmap"

    with Then("user1/networks/ip should be in config"):
        chi = kubectl.get("chi", chi_name)
        assert "user1/networks/ip" in chi["spec"]["configuration"]["users"]

    start_time = kubectl.get_field("pod", f"chi-{chi_name}-default-0-0-0",
                                   ".status.startTime")

    kubectl.create_and_check(config="configs/test-018-configmap-2.yaml",
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })
    with Then("user2/networks should be in config"):
        chi = kubectl.get("chi", chi_name)
        assert "user2/networks/ip" in chi["spec"]["configuration"]["users"]
        with And("user1/networks/ip should NOT be in config"):
            assert "user1/networks/ip" not in chi["spec"]["configuration"][
                "users"]
        with And("Pod should not be restarted"):
            new_start_time = kubectl.get_field(
                "pod", f"chi-{chi_name}-default-0-0-0", ".status.startTime")
            assert start_time == new_start_time

    kubectl.delete_chi(chi_name)
def test_metrics_exporter_with_multiple_clickhouse_version():
    def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, max_retries=10):
        with And(f"metrics-exporter /metrics enpoint result should match with {expect_result}"):
            for i in range(1, max_retries):
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/metrics",
                    ns=operator_namespace
                )
                all_strings_expected_done = True
                for string, exists in expect_result.items():
                    all_strings_expected_done = (exists == (string in out))
                    if not all_strings_expected_done:
                        break

                if all_strings_expected_done:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert all_strings_expected_done, error()

    with Given("clickhouse-operator pod exists"):
        out = kubectl.launch("get pods -l app=clickhouse-operator", ns='kube-system').splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = "kube-system"

        with Then("check empty /metrics"):
            kubectl.delete_ns(kubectl.namespace, ok_to_fail=True)
            kubectl.create_ns(kubectl.namespace)
            check_monitoring_metrics(operator_namespace, operator_pod, expect_result={
                'chi_clickhouse_metric_VersionInteger': False,
            })

        with Then("Install multiple clickhouse version"):
            config = util.get_full_path("configs/test-017-multi-version.yaml")
            kubectl.create_and_check(
                config=config,
                check={
                    "object_counts": {
                        "statefulset": 4,
                        "pod": 4,
                        "service": 5,
                    },
                    "do_not_delete": True,
                })
            with And("Check not empty /metrics"):
                check_monitoring_metrics(operator_namespace, operator_pod, expect_result={
                    '# HELP chi_clickhouse_metric_VersionInteger': True,
                    '# TYPE chi_clickhouse_metric_VersionInteger gauge': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-0-0': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-1-0': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-2-0': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-3-0': True,

                })

        with Then("check empty /metrics after delete namespace"):
            kubectl.delete_ns(kubectl.namespace)
            check_monitoring_metrics(operator_namespace, operator_pod, expect_result={
                'chi_clickhouse_metric_VersionInteger': False,
            })
def test_020(config="configs/test-020-multi-volume.yaml"):
    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(
        config=config,
        check={
            "pod_count": 1,
            "pod_volumes": {
                "/var/lib/clickhouse",
                "/var/lib/clickhouse2",
            },
            "do_not_delete": 1,
        })

    with When("Create a table and insert 1 row"):
        clickhouse.query(chi, "create table test_disks(a Int8) Engine = MergeTree() order by a")
        clickhouse.query(chi, "insert into test_disks values (1)")

        with Then("Data should be placed on default disk"):
            out = clickhouse.query(chi, "select disk_name from system.parts where table='test_disks'")
            assert out == 'default'

    with When("alter table test_disks move partition tuple() to disk 'disk2'"):
        clickhouse.query(chi, "alter table test_disks move partition tuple() to disk 'disk2'")

        with Then("Data should be placed on disk2"):
            out = clickhouse.query(chi, "select disk_name from system.parts where table='test_disks'")
            assert out == 'disk2'

    kubectl.delete_chi(chi)
def test_examples02_1():
    create_and_check(
        "../docs/chi-examples/02-persistent-volume-01-default-volume.yaml", {
            "pod_count": 1,
            "pod_volumes":
            {"/var/lib/clickhouse", "/var/log/clickhouse-server"}
        })
Пример #9
0
def test_015():
    kubectl.create_and_check(config="configs/test-015-host-network.yaml",
                             check={
                                 "pod_count": 2,
                                 "do_not_delete": 1,
                             })

    time.sleep(30)

    with Then("Query from one server to another one should work"):
        out = clickhouse.query(
            "test-015-host-network",
            host="chi-test-015-host-network-default-0-0",
            port="10000",
            sql=
            "SELECT * FROM remote('chi-test-015-host-network-default-0-1', system.one)"
        )
        print("remote out=")
        print(out)

    with Then("Distributed query should work"):
        out = clickhouse.query(
            "test-015-host-network",
            host="chi-test-015-host-network-default-0-0",
            port="10000",
            sql=
            "SELECT count() FROM cluster('all-sharded', system.one) settings receive_timeout=10"
        )
        print("cluster out=")
        print(out)
        assert out == "2"

    kubectl.delete_chi("test-015-host-network")
Пример #10
0
def test_ch_002(self):
    kubectl.create_and_check(
        "configs/test-ch-002-row-level.yaml", {
            "apply_templates": {"templates/tpl-clickhouse-20.3.yaml"},
            "do_not_delete": 1,
        })

    chi = "test-ch-002-row-level"
    create_table = """create table test (d Date default today(), team LowCardinality(String), user String) Engine = MergeTree() PARTITION BY d ORDER BY d;"""

    with When("Create test table"):
        clickhouse.query(chi, create_table)

    with And("Insert some data"):
        clickhouse.query(
            chi,
            "INSERT INTO test(team, user) values('team1', 'user1'),('team2', 'user2'),('team3', 'user3'),('team4', 'user4')"
        )

    with Then(
            "Make another query for different users. It should be restricted to corresponding team by row-level security"
    ):
        for user in ['user1', 'user2', 'user3', 'user4']:
            out = clickhouse.query(chi, "select user from test", user=user)
            assert out == user

    with Then(
            "Make a count() query for different users. It should be restricted to corresponding team by row-level security"
    ):
        for user in ['user1', 'user2', 'user3', 'user4']:
            out = clickhouse.query(chi, "select count() from test", user=user)
            assert out == "1"

    kubectl.delete_chi(chi)
def test_metrics_exporter_reboot():
    def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_retries=10):
        with And(f"metrics-exporter /chi enpoint result should return {expect_result}"):
            for i in range(1, max_retries):
                # check /metrics for try to refresh monitored instances
                kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/metrics",
                    ns=operator_namespace
                )
                # check /chi after refresh monitored instances
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/chi",
                    ns=operator_namespace
                )
                out = json.loads(out)
                if out == expect_result:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert out == expect_result, error()

    with Given("clickhouse-operator is installed"):
        kubectl.wait_field("pods", "-l app=clickhouse-operator", ".status.containerStatuses[*].ready", "true,true",
                           ns=settings.operator_namespace)
        assert kubectl.get_count("pod", ns='--all-namespaces', label="-l app=clickhouse-operator") > 0, error()

        out = kubectl.launch("get pods -l app=clickhouse-operator", ns=settings.operator_namespace).splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = settings.operator_namespace
        kubectl.delete_ns(kubectl.namespace)
        kubectl.create_ns(kubectl.namespace)
        check_monitoring_chi(operator_namespace, operator_pod, [])
        with And("created simple clickhouse installation"):
            config = util.get_full_path("../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml")
            kubectl.create_and_check(
                config=config,
                check={
                    "object_counts": {
                        "statefulset": 1,
                        "pod": 1,
                        "service": 2,
                    },
                    "do_not_delete": True,
                })
            expected_chi = [{
                "namespace": "test", "name": "simple-01",
                "hostnames": ["chi-simple-01-cluster-0-0.test.svc.cluster.local"]
            }]
            check_monitoring_chi(operator_namespace, operator_pod, expected_chi)
            with When("reboot metrics exporter"):
                kubectl.launch(f"exec -n {operator_namespace} {operator_pod} -c metrics-exporter -- reboot")
                time.sleep(15)
                kubectl.wait_field("pods", "-l app=clickhouse-operator",
                                        ".status.containerStatuses[*].ready", "true,true",
                                   ns=settings.operator_namespace)
                with Then("check metrics exporter still contains chi objects"):
                    check_monitoring_chi(operator_namespace, operator_pod, expected_chi)
                    kubectl.delete(config)
                    check_monitoring_chi(operator_namespace, operator_pod, [])
def test_007():
    kubectl.create_and_check(
        config="configs/test-007-custom-ports.yaml",
        check={
            "pod_count": 1,
            "pod_ports": [8124, 9001, 9010],
        }
    )
Пример #13
0
def test_004():
    kubectl.create_and_check(config="configs/test-004-tpl.yaml",
                             check={
                                 "pod_count": 1,
                                 "pod_volumes": {
                                     "/var/lib/clickhouse",
                                 },
                             })
def test_examples02_2():
    create_and_check(
        "../docs/chi-examples/02-persistent-volume-02-pod-template.yaml", {
            "pod_count": 1,
            "pod_image": "yandex/clickhouse-server:19.3.7",
            "pod_volumes":
            {"/var/lib/clickhouse", "/var/log/clickhouse-server"}
        })
Пример #15
0
def test_examples01_1():
    kubectl.create_and_check(
        config="../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml",
        check={"object_counts": {
            "statefulset": 1,
            "pod": 1,
            "service": 2,
        }})
def test_system_settings_changed(self):
    changed_pod, changed_svc, _, _ = alerts.random_pod_choice_for_callbacks(
        chi)

    with When("apply changed settings"):
        kubectl.create_and_check(
            config="configs/test-cluster-for-alerts-changed-settings.yaml",
            check={
                "apply_templates": [
                    "templates/tpl-clickhouse-stable.yaml",
                    "templates/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete":
                1
            })

    with Then("check ClickHouseSystemSettingsChanged firing"):
        fired = alerts.wait_alert_state("ClickHouseSystemSettingsChanged",
                                        "firing",
                                        True,
                                        labels={"hostname": changed_svc},
                                        time_range="30s")
        assert fired, error(
            "can't get ClickHouseTooManyConnections alert in firing state")

    with When("rollback changed settings"):
        kubectl.create_and_check(
            config="configs/test-cluster-for-alerts.yaml",
            check={
                "apply_templates": [
                    "templates/tpl-clickhouse-latest.yaml",
                    "templates/tpl-clickhouse-alerts.yaml",
                    "templates/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete":
                1
            })

    with Then("check ClickHouseSystemSettingsChanged gone away"):
        resolved = alerts.wait_alert_state("ClickHouseSystemSettingsChanged",
                                           "firing",
                                           False,
                                           labels={"hostname": changed_svc},
                                           sleep_time=30)
        assert resolved, error(
            "can't check ClickHouseTooManyConnections alert is gone away")
Пример #17
0
def test_001():
    kubectl.create_and_check(config="configs/test-001.yaml",
                             check={
                                 "object_counts": {
                                     "statefulset": 1,
                                     "pod": 1,
                                     "service": 2,
                                 },
                                 "configmaps": 1,
                             })
Пример #18
0
def test_examples02_2():
    kubectl.create_and_check(
        config="../docs/chi-examples/03-persistent-volume-02-pod-template.yaml",
        check={
            "pod_count": 1,
            "pod_image": "yandex/clickhouse-server:19.3.7",
            "pod_volumes": {
                "/var/lib/clickhouse",
                "/var/log/clickhouse-server",
            },
        })
Пример #19
0
def test_003():
    kubectl.create_and_check(
        config="configs/test-003-complex-layout.yaml",
        check={
            "object_counts": {
                "statefulset": 4,
                "pod": 4,
                "service": 5,
            },
        },
    )
Пример #20
0
def test_005():
    kubectl.create_and_check(
        config="configs/test-005-acm.yaml",
        check={
            "pod_count": 1,
            "pod_volumes": {
                "/var/lib/clickhouse",
            },
        },
        timeout=1200,
    )
Пример #21
0
def test_examples02_1():
    kubectl.create_and_check(
        config=
        "../docs/chi-examples/03-persistent-volume-01-default-volume.yaml",
        check={
            "pod_count": 1,
            "pod_volumes": {
                "/var/lib/clickhouse",
                "/var/log/clickhouse-server",
            },
        })
Пример #22
0
def test_022(config="configs/test-022-broken-image.yaml"):
    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(config=config,
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                                 "chi_status": "InProgress",
                             })
    with When("ClickHouse image can not be retrieved"):
        kubectl.wait_field(
            "pod", "chi-test-022-broken-image-default-0-0-0",
            ".status.containerStatuses[0].state.waiting.reason",
            "ErrImagePull")
        kubectl.delete_chi(chi)
def test_019(config="configs/test-019-retain-volume.yaml"):
    require_zookeeper()

    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(
        config=config,
        check={
            "pod_count": 1,
            "do_not_delete": 1,
        })

    create_non_replicated_table = "create table t1 Engine = Log as select 1 as a"
    create_replicated_table = """
    create table t2 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    partition by tuple() order by a
    as select 1 as a""".replace('\r', '').replace('\n', '')

    with Given("ClickHouse has some data in place"):
        clickhouse.query(chi, sql=create_non_replicated_table)
        clickhouse.query(chi, sql=create_replicated_table)

    with When("CHI with retained volume is deleted"):
        pvc_count = kubectl.get_count("pvc")
        pv_count = kubectl.get_count("pv")

        kubectl.delete_chi(chi)

        with Then("PVC should be retained"):
            assert kubectl.get_count("pvc") == pvc_count
            assert kubectl.get_count("pv") == pv_count

    with When("Re-create CHI"):
        kubectl.create_and_check(
            config=config,
            check={
                "pod_count": 1,
                "do_not_delete": 1,
            })

    with Then("PVC should be re-mounted"):
        with And("Non-replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t1")
            assert out == "1"
        with And("Replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t2")
            assert out == "1"

    kubectl.delete_chi(chi)
Пример #24
0
def test_002():
    kubectl.create_and_check(config="configs/test-002-tpl.yaml",
                             check={
                                 "pod_count": 1,
                                 "apply_templates": {
                                     settings.clickhouse_template,
                                     "templates/tpl-log-volume.yaml",
                                     "templates/tpl-one-per-host.yaml",
                                 },
                                 "pod_image": settings.clickhouse_version,
                                 "pod_volumes": {
                                     "/var/log/clickhouse-server",
                                 },
                                 "pod_podAntiAffinity": 1
                             })
def test_version_changed():
    changed_pod, changed_svc, _, _ = random_pod_choice_for_callbacks()

    with When("apply changed settings"):
        kubectl.create_and_check(
            config="configs/test-cluster-for-alerts-changed-settings.yaml",
            check={
                "apply_templates": [
                    "templates/tpl-clickhouse-20.7.yaml",
                    "templates/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete": 1
            }
        )
        prometheus_scrape_interval = 15
        with And(f"wait prometheus_scrape_interval={prometheus_scrape_interval}*2 sec"):
            time.sleep(prometheus_scrape_interval * 2)

    with Then("check ClickHouseVersionChanged firing"):
        fired = wait_alert_state("ClickHouseVersionChanged", "firing", True, labels={"hostname": changed_svc},
                                 time_range="30s", sleep_time=5)
        assert fired, error("can't get ClickHouseVersionChanged alert in firing state")

    with When("rollback changed settings"):
        kubectl.create_and_check(
            config="configs/test-cluster-for-alerts.yaml",
            check={
                "apply_templates": [
                    "templates/tpl-clickhouse-latest.yaml",
                    "templates/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete": 1
            }
        )

    with Then("check ClickHouseVersionChanged gone away"):
        resolved = wait_alert_state("ClickHouseVersionChanged", "firing", False, labels={"hostname": changed_svc}, sleep_time=30)
        assert resolved, error("can't check ClickHouseVersionChanged alert is gone away")
Пример #26
0
def apply_fake_backup(message):
    with Given(message):
        kubectl.create_and_check(
            config="configs/test-cluster-for-backups-fake.yaml",
            check={
                "apply_templates": [
                    "templates/tpl-clickhouse-backups-fake.yaml",
                    "templates/tpl-persistent-volume-100Mi.yaml"
                ],
                "object_counts": {
                    "statefulset": 2,
                    "pod": 2,
                    "service": 3,
                },
                "do_not_delete":
                1
            })
def test_010():
    set_operator_version(settings.operator_version)
    require_zookeeper()

    kubectl.create_and_check(
        config="configs/test-010-zkroot.yaml",
        check={
            "apply_templates": {
                settings.clickhouse_template,
            },
            "pod_count": 1,
            "do_not_delete": 1,
        }
    )
    with And("ClickHouse should complain regarding zookeeper path"):
        out = clickhouse.query_with_error("test-010-zkroot", "select * from system.zookeeper where path = '/'")
        assert "You should create root node /clickhouse/test-010-zkroot before start" in out, error()

    kubectl.delete_chi("test-010-zkroot")
def test_012():
    kubectl.create_and_check(
        config="configs/test-012-service-template.yaml",
        check={
            "object_counts": {
                "statefulset": 2,
                "pod": 2,
                "service": 4,
            },
            "do_not_delete": 1,
        }
    )
    with Then("There should be a service for chi"):
        kubectl.check_service("service-test-012", "LoadBalancer")
    with And("There should be a service for shard 0"):
        kubectl.check_service("service-test-012-0-0", "ClusterIP")
    with And("There should be a service for shard 1"):
        kubectl.check_service("service-test-012-1-0", "ClusterIP")
    with And("There should be a service for default cluster"):
        kubectl.check_service("service-default", "ClusterIP")

    node_port = kubectl.get("service", "service-test-012")["spec"]["ports"][0]["nodePort"]

    with Then("Update chi"):
        kubectl.create_and_check(
            config="configs/test-012-service-template-2.yaml",
            check={
                "object_counts": {
                    "statefulset": 1,
                    "pod": 1,
                    "service": 3,
                },
                "do_not_delete": 1,
            }
        )

        with And("NodePort should not change"):
            new_node_port = kubectl.get("service", "service-test-012")["spec"]["ports"][0]["nodePort"]
            assert new_node_port == node_port, \
                f"LoadBalancer.spec.ports[0].nodePort changed from {node_port} to {new_node_port}"

    kubectl.delete_chi("test-012")
Пример #29
0
def test_006():
    with Then("Create initial position"):
        kubectl.create_and_check(config="configs/test-006-ch-upgrade-1.yaml",
                                 check={
                                     "pod_count": 2,
                                     "pod_image":
                                     "yandex/clickhouse-server:19.11",
                                     "do_not_delete": 1,
                                 })
    with Then(
            "Use different podTemplate and confirm that pod image is updated"):
        kubectl.create_and_check(config="configs/test-006-ch-upgrade-2.yaml",
                                 check={
                                     "pod_count": 2,
                                     "pod_image":
                                     "yandex/clickhouse-server:19.16",
                                     "do_not_delete": 1,
                                 })
    with Then(
            "Change image in podTemplate itself and confirm that pod image is updated"
    ):
        kubectl.create_and_check(config="configs/test-006-ch-upgrade-3.yaml",
                                 check={
                                     "pod_count": 2,
                                     "pod_image":
                                     "yandex/clickhouse-server:19.11",
                                 })
Пример #30
0
def test_011_1():
    with Given(
            "test-011-secured-default.yaml with password_sha256_hex for default user"
    ):
        kubectl.create_and_check(
            config="configs/test-011-secured-default.yaml",
            check={
                "pod_count": 1,
                "do_not_delete": 1,
            })

        with Then("Default user password should be '_removed_'"):
            chi = kubectl.get("chi", "test-011-secured-default")
            assert "default/password" in chi["status"]["normalized"][
                "configuration"]["users"]
            assert chi["status"]["normalized"]["configuration"]["users"][
                "default/password"] == "_removed_"

        with And("Connection to localhost should succeed with default user"):
            out = clickhouse.query_with_error(
                "test-011-secured-default",
                "select 'OK'",
                pwd="clickhouse_operator_password")
            assert out == 'OK'

        with When("Trigger installation update"):
            kubectl.create_and_check(
                config="configs/test-011-secured-default-2.yaml",
                check={
                    "do_not_delete": 1,
                })
            with Then("Default user password should be '_removed_'"):
                chi = kubectl.get("chi", "test-011-secured-default")
                assert "default/password" in chi["status"]["normalized"][
                    "configuration"]["users"]
                assert chi["status"]["normalized"]["configuration"]["users"][
                    "default/password"] == "_removed_"

        with When("Default user is assigned the different profile"):
            kubectl.create_and_check(
                config="configs/test-011-secured-default-3.yaml",
                check={
                    "do_not_delete": 1,
                })
            with Then("Wait until configmap is reloaded"):
                # Need to wait to make sure configuration is reloaded. For some reason it takes long here
                # Maybe we can restart the pod to speed it up
                time.sleep(120)
            with Then(
                    "Connection to localhost should succeed with default user"
            ):
                out = clickhouse.query_with_error("test-011-secured-default",
                                                  "select 'OK'")
                assert out == 'OK'

        kubectl.delete_chi("test-011-secured-default")