示例#1
0
def test_operator_restart(config, version=settings.operator_version):
    with Given(f"clickhouse-operator {version}"):
        set_operator_version(version)
        config = util.get_full_path(config)
        chi = manifest.get_chi_name(config)
        cluster = chi

        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                       ".status.startTime")

        with When("Restart operator"):
            restart_operator()
            time.sleep(5)
            kubectl.wait_chi_status(chi, "Completed")
            kubectl.wait_objects(chi, {
                "statefulset": 1,
                "pod": 1,
                "service": 2,
            })
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-{cluster}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

        kubectl.delete_chi(chi)
示例#2
0
def test_018():
    kubectl.create_and_check(config="configs/test-018-configmap.yaml",
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })
    chi_name = "test-018-configmap"

    with Then("user1/networks/ip should be in config"):
        chi = kubectl.get("chi", chi_name)
        assert "user1/networks/ip" in chi["spec"]["configuration"]["users"]

    start_time = kubectl.get_field("pod", f"chi-{chi_name}-default-0-0-0",
                                   ".status.startTime")

    kubectl.create_and_check(config="configs/test-018-configmap-2.yaml",
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })
    with Then("user2/networks should be in config"):
        chi = kubectl.get("chi", chi_name)
        assert "user2/networks/ip" in chi["spec"]["configuration"]["users"]
        with And("user1/networks/ip should NOT be in config"):
            assert "user1/networks/ip" not in chi["spec"]["configuration"][
                "users"]
        with And("Pod should not be restarted"):
            new_start_time = kubectl.get_field(
                "pod", f"chi-{chi_name}-default-0-0-0", ".status.startTime")
            assert start_time == new_start_time

    kubectl.delete_chi(chi_name)
示例#3
0
def test_operator_upgrade(config,
                          version_from,
                          version_to=settings.operator_version):
    version_to = settings.operator_version
    with Given(f"clickhouse-operator {version_from}"):
        set_operator_version(version_from)
        config = util.get_full_path(config)
        chi = manifest.get_chi_name(config)

        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0",
                                       ".status.startTime")

        with When(f"upgrade operator to {version_to}"):
            set_operator_version(version_to, timeout=120)
            time.sleep(5)
            kubectl.wait_chi_status(chi, "Completed", retries=6)
            kubectl.wait_objects(chi, {
                "statefulset": 1,
                "pod": 1,
                "service": 2
            })
            new_start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

        kubectl.delete_chi(chi)
示例#4
0
def test_016():
    chi = "test-016-settings"
    kubectl.create_and_check(config="configs/test-016-settings-01.yaml",
                             check={
                                 "apply_templates": {
                                     settings.clickhouse_template,
                                 },
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })

    with Then("Custom macro 'layer' should be available"):
        out = clickhouse.query(
            chi,
            sql="select substitution from system.macros where macro='layer'")
        assert out == "01"

    with And("Custom macro 'test' should be available"):
        out = clickhouse.query(
            chi,
            sql="select substitution from system.macros where macro='test'")
        assert out == "test"

    with And("dictGet() should work"):
        out = clickhouse.query(chi,
                               sql="select dictGet('one', 'one', toUInt64(0))")
        assert out == "0"

    with And("query_log should be disabled"):
        clickhouse.query(chi, sql="system flush logs")
        out = clickhouse.query_with_error(
            chi, sql="select count() from system.query_log")
        assert "doesn't exist" in out

    with And("max_memory_usage should be 7000000000"):
        out = clickhouse.query(
            chi,
            sql=
            "select value from system.settings where name='max_memory_usage'")
        assert out == "7000000000"

    with And("test_usersd user should be available"):
        clickhouse.query(chi, sql="select version()", user="******")

    with And("user1 user should be available"):
        clickhouse.query(chi,
                         sql="select version()",
                         user="******",
                         pwd="qwerty")

    with And("system.clusters should be empty due to remote_servers override"):
        out = clickhouse.query(chi, sql="select count() from system.clusters")
        assert out == "0"

    with When("Update usersd settings"):
        start_time = kubectl.get_field("pod", f"chi-{chi}-default-0-0-0",
                                       ".status.startTime")
        kubectl.create_and_check(config="configs/test-016-settings-02.yaml",
                                 check={
                                     "do_not_delete": 1,
                                 })
        with Then("Wait for configmap changes to apply"):
            config_map_applied_num = "0"
            i = 1
            while config_map_applied_num == "0" and i < 10:
                config_map_applied_num = kubectl.launch(
                    f"exec chi-{chi}-default-0-0-0 -- bash -c \"grep test_norestart /etc/clickhouse-server/users.d/my_users.xml | wc -l\""
                )
                if config_map_applied_num != "0":
                    break
                with And(f"not applied, wait {15 * i}s"):
                    time.sleep(15 * i)
                    i += 1

            assert config_map_applied_num != "0", "ConfigMap should be applied"

        version = ""
        with Then("test_norestart user should be available"):
            version = clickhouse.query(chi,
                                       sql="select version()",
                                       user="******")
        with And("user1 user should not be available"):
            version_user1 = clickhouse.query_with_error(chi,
                                                        sql="select version()",
                                                        user="******",
                                                        pwd="qwerty")
            assert version != version_user1
        with And("user2 user should be available"):
            version_user2 = clickhouse.query(chi,
                                             sql="select version()",
                                             user="******",
                                             pwd="qwerty")
            assert version == version_user2
        with And("ClickHouse should not be restarted"):
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-default-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

    kubectl.delete_chi("test-016-settings")
示例#5
0
def test_014():
    require_zookeeper()

    create_table = """
    CREATE TABLE test_local(a Int8) 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    PARTITION BY tuple() 
    ORDER BY a
    """.replace('\r', '').replace('\n', '')

    config = "configs/test-014-replication-1.yaml"
    chi = manifest.get_chi_name(util.get_full_path(config))
    cluster = "default"

    kubectl.create_and_check(
        config=config,
        check={
            "apply_templates": {
                settings.clickhouse_template,
                "templates/tpl-persistent-volume-100Mi.yaml",
            },
            "object_counts": {
                "statefulset": 2,
                "pod": 2,
                "service": 3,
            },
            "do_not_delete": 1,
        })

    start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                   ".status.startTime")

    schema_objects = ['test_local', 'test_view', 'test_mv', 'a_view']
    with Given("Create schema objects"):
        clickhouse.query(chi, create_table, host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW test_view as SELECT * from test_local",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW a_view as SELECT * from test_view",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE MATERIALIZED VIEW test_mv Engine = Log as SELECT * from test_local",
            host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE DICTIONARY test_dict (a Int8, b Int8) PRIMARY KEY a SOURCE(CLICKHOUSE(host 'localhost' port 9000 table 'test_local' user 'default')) LAYOUT(FLAT()) LIFETIME(0)",
            host=f"chi-{chi}-{cluster}-0-0")

    with Given(
            "Replicated table is created on a first replica and data is inserted"
    ):
        clickhouse.query(chi,
                         "INSERT INTO test_local values(1)",
                         host=f"chi-{chi}-{cluster}-0-0")
        with When("Table is created on the second replica"):
            clickhouse.query(chi,
                             create_table,
                             host=f"chi-{chi}-{cluster}-0-1")
            # Give some time for replication to catch up
            time.sleep(10)
            with Then("Data should be replicated"):
                out = clickhouse.query(chi,
                                       "SELECT a FROM test_local",
                                       host=f"chi-{chi}-{cluster}-0-1")
                assert out == "1"

    with When("Add one more replica"):
        kubectl.create_and_check(config="configs/test-014-replication-2.yaml",
                                 check={
                                     "pod_count": 3,
                                     "do_not_delete": 1,
                                 })
        # Give some time for replication to catch up
        time.sleep(10)

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Schema objects should be migrated to the new replica"):
            for obj in schema_objects:
                out = clickhouse.query(
                    chi,
                    f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                    host=f"chi-{chi}-{cluster}-0-2")
                assert out == "1"
            # Check dictionary
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.dictionaries WHERE name = 'test_dict'",
                host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

        with And("Replicated table should have the data"):
            out = clickhouse.query(chi,
                                   "SELECT a FROM test_local",
                                   host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

    with When("Remove replica"):
        kubectl.create_and_check(config=config,
                                 check={
                                     "pod_count": 1,
                                     "do_not_delete": 1,
                                 })

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Replica needs to be removed from the Zookeeper as well"):
            out = clickhouse.query(
                chi,
                "SELECT count() FROM system.replicas WHERE table='test_local'")
            assert out == "1"

    with When("Restart Zookeeper pod"):
        with Then("Delete Zookeeper pod"):
            kubectl.launch("delete pod zookeeper-0")
            time.sleep(1)

        with Then(
                "Insert into the table while there is no Zookeeper -- table should be in readonly mode"
        ):
            out = clickhouse.query_with_error(
                chi, "INSERT INTO test_local values(2)")
            assert "Table is in readonly mode" in out

        with Then("Wait for Zookeeper pod to come back"):
            kubectl.wait_object("pod", "zookeeper-0")
            kubectl.wait_pod_status("zookeeper-0", "Running")

        with Then(
                "Wait for ClickHouse to reconnect to Zookeeper and switch to read-write mode"
        ):
            time.sleep(30)
        # with Then("Restart clickhouse pods"):
        #    kubectl("delete pod chi-test-014-replication-default-0-0-0")
        #    kubectl("delete pod chi-test-014-replication-default-0-1-0")

        with Then("Table should be back to normal"):
            clickhouse.query(chi, "INSERT INTO test_local values(3)")

    kubectl.delete_chi("test-014-replication")
示例#6
0
def test_013():
    config = "configs/test-013-add-shards-1.yaml"
    chi = manifest.get_chi_name(util.get_full_path(config))
    cluster = "default"

    kubectl.create_and_check(config=config,
                             check={
                                 "apply_templates": {
                                     settings.clickhouse_template,
                                 },
                                 "object_counts": {
                                     "statefulset": 1,
                                     "pod": 1,
                                     "service": 2,
                                 },
                                 "do_not_delete": 1,
                             })
    start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                   ".status.startTime")

    schema_objects = [
        'test_local',
        'test_distr',
        'events-distr',
    ]
    with Then("Create local and distributed tables"):
        clickhouse.query(
            chi,
            "CREATE TABLE test_local Engine = Log as SELECT * FROM system.one")
        clickhouse.query(
            chi,
            "CREATE TABLE test_distr as test_local Engine = Distributed('default', default, test_local)"
        )
        clickhouse.query(chi, "CREATE DATABASE \\\"test-db\\\"")
        clickhouse.query(
            chi,
            "CREATE TABLE \\\"test-db\\\".\\\"events-distr\\\" as system.events "
            "ENGINE = Distributed('all-sharded', system, events)")

    with Then("Add shards"):
        kubectl.create_and_check(config="configs/test-013-add-shards-2.yaml",
                                 check={
                                     "object_counts": {
                                         "statefulset": 3,
                                         "pod": 3,
                                         "service": 4,
                                     },
                                     "do_not_delete": 1,
                                 })

    # Give some time for replication to catch up
    time.sleep(10)

    with Then("Unaffected pod should not be restarted"):
        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

    with And("Schema objects should be migrated to new shards"):
        for obj in schema_objects:
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                host=f"chi-{chi}-{cluster}-1-0")
            assert out == "1"
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                host=f"chi-{chi}-{cluster}-2-0")
            assert out == "1"

    with When("Remove shards"):
        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        time.sleep(10)
        with Then("Unaffected pod should not be restarted"):
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-{cluster}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

    kubectl.delete_chi(chi)
示例#7
0
def test_021(config="configs/test-021-rescale-volume-01.yaml"):
    with Given("Default storage class is expandable"):
        default_storage_class = kubectl.get_default_storage_class()
        assert default_storage_class is not None
        assert len(default_storage_class) > 0
        allow_volume_expansion = kubectl.get_field("storageclass",
                                                   default_storage_class,
                                                   ".allowVolumeExpansion")
        if allow_volume_expansion != "true":
            kubectl.launch(
                f"patch storageclass {default_storage_class} -p '{{\"allowVolumeExpansion\":true}}'"
            )

    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(config=config,
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })

    with Then("Storage size should be 100Mi"):
        size = kubectl.get_pvc_size(
            "disk1-chi-test-021-rescale-volume-simple-0-0-0")
        assert size == "100Mi"

    with When("Re-scale volume configuration to 200Mb"):
        kubectl.create_and_check(
            config="configs/test-021-rescale-volume-02-enlarge-disk.yaml",
            check={
                "pod_count": 1,
                "do_not_delete": 1,
            })

        with Then("Storage size should be 200Mi"):
            size = kubectl.get_pvc_size(
                "disk1-chi-test-021-rescale-volume-simple-0-0-0")
            assert size == "200Mi"

    with When("Add second disk 50Mi"):
        kubectl.create_and_check(
            config="configs/test-021-rescale-volume-03-add-disk.yaml",
            check={
                "pod_count": 1,
                "pod_volumes": {
                    "/var/lib/clickhouse",
                    "/var/lib/clickhouse2",
                },
                "do_not_delete": 1,
            })

        with Then("There should be two PVC"):
            size = kubectl.get_pvc_size(
                "disk1-chi-test-021-rescale-volume-simple-0-0-0")
            assert size == "200Mi"
            size = kubectl.get_pvc_size(
                "disk2-chi-test-021-rescale-volume-simple-0-0-0")
            assert size == "50Mi"

        with And("There should be two disks recognized by ClickHouse"):
            # ClickHouse requires some time to mount volume. Race conditions.

            time.sleep(120)
            out = clickhouse.query(chi, "SELECT count() FROM system.disks")
            print("SELECT count() FROM system.disks RETURNED:")
            print(out)
            assert out == "2"

    kubectl.delete_chi(chi)