Example #1
0
def require_zookeeper(manifest='zookeeper-1-node-1GB-for-tests-only.yaml', force_install=False):
    with Given("Install Zookeeper if missing"):
        if force_install or kubectl.get_count("service", name="zookeeper") == 0:
            config = util.get_full_path(f"../deploy/zookeeper/quick-start-persistent-volume/{manifest}")
            kubectl.apply(config)
            kubectl.wait_object("pod", "zookeeper-0")
            kubectl.wait_pod_status("zookeeper-0", "Running")
Example #2
0
def restart_operator(ns=settings.operator_namespace, timeout=60):
    pod_name = kubectl.get(
        "pod", name="", ns=ns,
        label="-l app=clickhouse-operator")["items"][0]["metadata"]["name"]
    kubectl.launch(f"delete pod {pod_name}", ns=ns, timeout=timeout)
    kubectl.wait_object("pod",
                        name="",
                        ns=ns,
                        label="-l app=clickhouse-operator")
    pod_name = kubectl.get(
        "pod", name="", ns=ns,
        label="-l app=clickhouse-operator")["items"][0]["metadata"]["name"]
    kubectl.wait_pod_status(pod_name, "Running", ns=ns)
Example #3
0
def test_014():
    require_zookeeper()

    create_table = """
    CREATE TABLE test_local(a Int8) 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    PARTITION BY tuple() 
    ORDER BY a
    """.replace('\r', '').replace('\n', '')

    config = "configs/test-014-replication-1.yaml"
    chi = manifest.get_chi_name(util.get_full_path(config))
    cluster = "default"

    kubectl.create_and_check(
        config=config,
        check={
            "apply_templates": {
                settings.clickhouse_template,
                "templates/tpl-persistent-volume-100Mi.yaml",
            },
            "object_counts": {
                "statefulset": 2,
                "pod": 2,
                "service": 3,
            },
            "do_not_delete": 1,
        })

    start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                   ".status.startTime")

    schema_objects = ['test_local', 'test_view', 'test_mv', 'a_view']
    with Given("Create schema objects"):
        clickhouse.query(chi, create_table, host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW test_view as SELECT * from test_local",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW a_view as SELECT * from test_view",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE MATERIALIZED VIEW test_mv Engine = Log as SELECT * from test_local",
            host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE DICTIONARY test_dict (a Int8, b Int8) PRIMARY KEY a SOURCE(CLICKHOUSE(host 'localhost' port 9000 table 'test_local' user 'default')) LAYOUT(FLAT()) LIFETIME(0)",
            host=f"chi-{chi}-{cluster}-0-0")

    with Given(
            "Replicated table is created on a first replica and data is inserted"
    ):
        clickhouse.query(chi,
                         "INSERT INTO test_local values(1)",
                         host=f"chi-{chi}-{cluster}-0-0")
        with When("Table is created on the second replica"):
            clickhouse.query(chi,
                             create_table,
                             host=f"chi-{chi}-{cluster}-0-1")
            # Give some time for replication to catch up
            time.sleep(10)
            with Then("Data should be replicated"):
                out = clickhouse.query(chi,
                                       "SELECT a FROM test_local",
                                       host=f"chi-{chi}-{cluster}-0-1")
                assert out == "1"

    with When("Add one more replica"):
        kubectl.create_and_check(config="configs/test-014-replication-2.yaml",
                                 check={
                                     "pod_count": 3,
                                     "do_not_delete": 1,
                                 })
        # Give some time for replication to catch up
        time.sleep(10)

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Schema objects should be migrated to the new replica"):
            for obj in schema_objects:
                out = clickhouse.query(
                    chi,
                    f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                    host=f"chi-{chi}-{cluster}-0-2")
                assert out == "1"
            # Check dictionary
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.dictionaries WHERE name = 'test_dict'",
                host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

        with And("Replicated table should have the data"):
            out = clickhouse.query(chi,
                                   "SELECT a FROM test_local",
                                   host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

    with When("Remove replica"):
        kubectl.create_and_check(config=config,
                                 check={
                                     "pod_count": 1,
                                     "do_not_delete": 1,
                                 })

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Replica needs to be removed from the Zookeeper as well"):
            out = clickhouse.query(
                chi,
                "SELECT count() FROM system.replicas WHERE table='test_local'")
            assert out == "1"

    with When("Restart Zookeeper pod"):
        with Then("Delete Zookeeper pod"):
            kubectl.launch("delete pod zookeeper-0")
            time.sleep(1)

        with Then(
                "Insert into the table while there is no Zookeeper -- table should be in readonly mode"
        ):
            out = clickhouse.query_with_error(
                chi, "INSERT INTO test_local values(2)")
            assert "Table is in readonly mode" in out

        with Then("Wait for Zookeeper pod to come back"):
            kubectl.wait_object("pod", "zookeeper-0")
            kubectl.wait_pod_status("zookeeper-0", "Running")

        with Then(
                "Wait for ClickHouse to reconnect to Zookeeper and switch to read-write mode"
        ):
            time.sleep(30)
        # with Then("Restart clickhouse pods"):
        #    kubectl("delete pod chi-test-014-replication-default-0-0-0")
        #    kubectl("delete pod chi-test-014-replication-default-0-1-0")

        with Then("Table should be back to normal"):
            clickhouse.query(chi, "INSERT INTO test_local values(3)")

    kubectl.delete_chi("test-014-replication")