예제 #1
0
def create_and_check(config, check, ns=namespace, timeout=600):
    config = util.get_full_path(config)
    chi_name = manifest.get_chi_name(config)

    if "apply_templates" in check:
        print("Need to apply additional templates")
        for t in check["apply_templates"]:
            print("Applying template:" + t)
            apply(util.get_full_path(t), ns=ns)
        time.sleep(5)

    apply(config, ns=ns, timeout=timeout)

    if "object_counts" in check:
        wait_objects(chi_name, check["object_counts"], ns=ns)

    if "pod_count" in check:
        wait_object("pod",
                    "",
                    label=f"-l clickhouse.altinity.com/chi={chi_name}",
                    count=check["pod_count"],
                    ns=ns)

    if "chi_status" in check:
        wait_chi_status(chi_name, check["chi_status"], ns=ns)
    else:
        wait_chi_status(chi_name, "Completed", ns=ns)

    if "pod_image" in check:
        check_pod_image(chi_name, check["pod_image"], ns=ns)

    if "pod_volumes" in check:
        check_pod_volumes(chi_name, check["pod_volumes"], ns=ns)

    if "pod_podAntiAffinity" in check:
        check_pod_antiaffinity(chi_name, ns=ns)

    if "pod_ports" in check:
        check_pod_ports(chi_name, check["pod_ports"], ns=ns)

    if "service" in check:
        check_service(check["service"][0], check["service"][1], ns=ns)

    if "configmaps" in check:
        check_configmaps(chi_name, ns=ns)

    if "do_not_delete" not in check:
        delete_chi(chi_name, ns=ns)
def test_020(config="configs/test-020-multi-volume.yaml"):
    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(
        config=config,
        check={
            "pod_count": 1,
            "pod_volumes": {
                "/var/lib/clickhouse",
                "/var/lib/clickhouse2",
            },
            "do_not_delete": 1,
        })

    with When("Create a table and insert 1 row"):
        clickhouse.query(chi, "create table test_disks(a Int8) Engine = MergeTree() order by a")
        clickhouse.query(chi, "insert into test_disks values (1)")

        with Then("Data should be placed on default disk"):
            out = clickhouse.query(chi, "select disk_name from system.parts where table='test_disks'")
            assert out == 'default'

    with When("alter table test_disks move partition tuple() to disk 'disk2'"):
        clickhouse.query(chi, "alter table test_disks move partition tuple() to disk 'disk2'")

        with Then("Data should be placed on disk2"):
            out = clickhouse.query(chi, "select disk_name from system.parts where table='test_disks'")
            assert out == 'disk2'

    kubectl.delete_chi(chi)
예제 #3
0
def require_zookeeper(manifest='zookeeper-1-node-1GB-for-tests-only.yaml', force_install=False):
    with Given("Install Zookeeper if missing"):
        if force_install or kubectl.get_count("service", name="zookeeper") == 0:
            config = util.get_full_path(f"../deploy/zookeeper/quick-start-persistent-volume/{manifest}")
            kubectl.apply(config)
            kubectl.wait_object("pod", "zookeeper-0")
            kubectl.wait_pod_status("zookeeper-0", "Running")
예제 #4
0
def test_operator_restart(config, version=settings.operator_version):
    with Given(f"clickhouse-operator {version}"):
        set_operator_version(version)
        config = util.get_full_path(config)
        chi = manifest.get_chi_name(config)
        cluster = chi

        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                       ".status.startTime")

        with When("Restart operator"):
            restart_operator()
            time.sleep(5)
            kubectl.wait_chi_status(chi, "Completed")
            kubectl.wait_objects(chi, {
                "statefulset": 1,
                "pod": 1,
                "service": 2,
            })
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-{cluster}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

        kubectl.delete_chi(chi)
def test_metrics_exporter_with_multiple_clickhouse_version():
    def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, max_retries=10):
        with And(f"metrics-exporter /metrics enpoint result should match with {expect_result}"):
            for i in range(1, max_retries):
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/metrics",
                    ns=operator_namespace
                )
                all_strings_expected_done = True
                for string, exists in expect_result.items():
                    all_strings_expected_done = (exists == (string in out))
                    if not all_strings_expected_done:
                        break

                if all_strings_expected_done:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert all_strings_expected_done, error()

    with Given("clickhouse-operator pod exists"):
        out = kubectl.launch("get pods -l app=clickhouse-operator", ns='kube-system').splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = "kube-system"

        with Then("check empty /metrics"):
            kubectl.delete_ns(kubectl.namespace, ok_to_fail=True)
            kubectl.create_ns(kubectl.namespace)
            check_monitoring_metrics(operator_namespace, operator_pod, expect_result={
                'chi_clickhouse_metric_VersionInteger': False,
            })

        with Then("Install multiple clickhouse version"):
            config = util.get_full_path("configs/test-017-multi-version.yaml")
            kubectl.create_and_check(
                config=config,
                check={
                    "object_counts": {
                        "statefulset": 4,
                        "pod": 4,
                        "service": 5,
                    },
                    "do_not_delete": True,
                })
            with And("Check not empty /metrics"):
                check_monitoring_metrics(operator_namespace, operator_pod, expect_result={
                    '# HELP chi_clickhouse_metric_VersionInteger': True,
                    '# TYPE chi_clickhouse_metric_VersionInteger gauge': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-0-0': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-1-0': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-2-0': True,
                    'chi_clickhouse_metric_VersionInteger{chi="test-017-multi-version",hostname="chi-test-017-multi-version-default-3-0': True,

                })

        with Then("check empty /metrics after delete namespace"):
            kubectl.delete_ns(kubectl.namespace)
            check_monitoring_metrics(operator_namespace, operator_pod, expect_result={
                'chi_clickhouse_metric_VersionInteger': False,
            })
예제 #6
0
def test_operator_upgrade(config,
                          version_from,
                          version_to=settings.operator_version):
    version_to = settings.operator_version
    with Given(f"clickhouse-operator {version_from}"):
        set_operator_version(version_from)
        config = util.get_full_path(config)
        chi = manifest.get_chi_name(config)

        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0",
                                       ".status.startTime")

        with When(f"upgrade operator to {version_to}"):
            set_operator_version(version_to, timeout=120)
            time.sleep(5)
            kubectl.wait_chi_status(chi, "Completed", retries=6)
            kubectl.wait_objects(chi, {
                "statefulset": 1,
                "pod": 1,
                "service": 2
            })
            new_start_time = kubectl.get_field("pod", f"chi-{chi}-{chi}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

        kubectl.delete_chi(chi)
    def _setup(self):
        """
            Set Amigo to run.
        """

        # Set output directories
        output_dir = util.get_value(self.config, "reports_dir")
        util.print_to_stdout(
            "Setting up output directory at '{0}'".format(output_dir))
        util.create_dir(output_dir)

        # Set current report
        self.reports = util.get_full_path(
            util.get_value(self.config, "reports_dir"), util.get_date())
        util.create_dir(self.reports)
        util.print_to_stdout("Reports are being saved to '{0}'".format(
            self.reports))

        # Search for previous reports
        days_back = 30
        for day in range(1, days_back):
            self.previous_reports = util.get_full_path(
                util.get_value(self.config, "reports_dir"), util.get_date(day))
            if not util.is_path(self.previous_reports):
                util.print_to_stdout(
                    "No previous reports found at {0}'".format(
                        self.previous_reports))
            else:
                util.print_to_stdout(
                    "Previous report {0} found ({1} day(s) ago)".format(
                        self.previous_reports, day),
                    color="yellow")
                break

        # Create Database
        self.database = Database(self.database_path)
        util.create_dir(self.database_path)
        util.print_to_stdout("Database is being saved at '{0}'".format(
            self.database_path))

        # Violation report config
        results_dir = util.get_value(self.config, "results_dir")
        util.create_dir(results_dir)
        results_file = util.get_value(self.config, "results_log_file")
        self.results = util.get_full_path(results_dir, results_file)
        util.print_to_stdout("Reports will be saved at '{0}'".format(
            self.results))
def test_metrics_exporter_reboot():
    def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_retries=10):
        with And(f"metrics-exporter /chi enpoint result should return {expect_result}"):
            for i in range(1, max_retries):
                # check /metrics for try to refresh monitored instances
                kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/metrics",
                    ns=operator_namespace
                )
                # check /chi after refresh monitored instances
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/chi",
                    ns=operator_namespace
                )
                out = json.loads(out)
                if out == expect_result:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert out == expect_result, error()

    with Given("clickhouse-operator is installed"):
        kubectl.wait_field("pods", "-l app=clickhouse-operator", ".status.containerStatuses[*].ready", "true,true",
                           ns=settings.operator_namespace)
        assert kubectl.get_count("pod", ns='--all-namespaces', label="-l app=clickhouse-operator") > 0, error()

        out = kubectl.launch("get pods -l app=clickhouse-operator", ns=settings.operator_namespace).splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = settings.operator_namespace
        kubectl.delete_ns(kubectl.namespace)
        kubectl.create_ns(kubectl.namespace)
        check_monitoring_chi(operator_namespace, operator_pod, [])
        with And("created simple clickhouse installation"):
            config = util.get_full_path("../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml")
            kubectl.create_and_check(
                config=config,
                check={
                    "object_counts": {
                        "statefulset": 1,
                        "pod": 1,
                        "service": 2,
                    },
                    "do_not_delete": True,
                })
            expected_chi = [{
                "namespace": "test", "name": "simple-01",
                "hostnames": ["chi-simple-01-cluster-0-0.test.svc.cluster.local"]
            }]
            check_monitoring_chi(operator_namespace, operator_pod, expected_chi)
            with When("reboot metrics exporter"):
                kubectl.launch(f"exec -n {operator_namespace} {operator_pod} -c metrics-exporter -- reboot")
                time.sleep(15)
                kubectl.wait_field("pods", "-l app=clickhouse-operator",
                                        ".status.containerStatuses[*].ready", "true,true",
                                   ns=settings.operator_namespace)
                with Then("check metrics exporter still contains chi objects"):
                    check_monitoring_chi(operator_namespace, operator_pod, expected_chi)
                    kubectl.delete(config)
                    check_monitoring_chi(operator_namespace, operator_pod, [])
예제 #9
0
def test_022(config="configs/test-022-broken-image.yaml"):
    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(config=config,
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                                 "chi_status": "InProgress",
                             })
    with When("ClickHouse image can not be retrieved"):
        kubectl.wait_field(
            "pod", "chi-test-022-broken-image-default-0-0-0",
            ".status.containerStatuses[0].state.waiting.reason",
            "ErrImagePull")
        kubectl.delete_chi(chi)
def test_019(config="configs/test-019-retain-volume.yaml"):
    require_zookeeper()

    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(
        config=config,
        check={
            "pod_count": 1,
            "do_not_delete": 1,
        })

    create_non_replicated_table = "create table t1 Engine = Log as select 1 as a"
    create_replicated_table = """
    create table t2 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    partition by tuple() order by a
    as select 1 as a""".replace('\r', '').replace('\n', '')

    with Given("ClickHouse has some data in place"):
        clickhouse.query(chi, sql=create_non_replicated_table)
        clickhouse.query(chi, sql=create_replicated_table)

    with When("CHI with retained volume is deleted"):
        pvc_count = kubectl.get_count("pvc")
        pv_count = kubectl.get_count("pv")

        kubectl.delete_chi(chi)

        with Then("PVC should be retained"):
            assert kubectl.get_count("pvc") == pvc_count
            assert kubectl.get_count("pv") == pv_count

    with When("Re-create CHI"):
        kubectl.create_and_check(
            config=config,
            check={
                "pod_count": 1,
                "do_not_delete": 1,
            })

    with Then("PVC should be re-mounted"):
        with And("Non-replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t1")
            assert out == "1"
        with And("Replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t2")
            assert out == "1"

    kubectl.delete_chi(chi)
    def _record_attribute_data_reports(self, attribute_item, attribute_data,
                                       project_name):
        """
            Save project attribute data to individual reports in disk.
            These reports are used for generating a quick diff report result.
            We use the symbol "@" to be able to split on it later, when reading
            the reports.
        """

        output_file = util.get_full_path(
            self.reports, project_name + "@" + attribute_item + '.json')

        util.save_to_json_file(attribute_data[0], output_file)
        util.print_to_stdout(
            "Resource data for {0} for project {1} saved to {2}".format(
                attribute_item, project_name, output_file),
            color="yellow")
예제 #12
0
def test_ch_001(self):
    util.require_zookeeper()
    chit_data = manifest.get_chit_data(
        util.get_full_path("templates/tpl-clickhouse-19.11.yaml"))
    kubectl.launch(f"delete chit {chit_data['metadata']['name']}",
                   ns=settings.test_namespace)
    kubectl.create_and_check(
        "configs/test-ch-001-insert-quorum.yaml", {
            "apply_templates": {"templates/tpl-clickhouse-20.8.yaml"},
            "pod_count": 2,
            "do_not_delete": 1,
        })
    chi = manifest.get_chi_name(
        util.get_full_path("configs/test-ch-001-insert-quorum.yaml"))
    chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi)
    util.wait_clickhouse_cluster_ready(chi_data)

    host0 = "chi-test-ch-001-insert-quorum-default-0-0"
    host1 = "chi-test-ch-001-insert-quorum-default-0-1"

    create_table = """
    create table t1 on cluster default (a Int8, d Date default today())
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by d order by a 
    TTL d + interval 5 second
    SETTINGS merge_with_ttl_timeout=5""".replace('\r', '').replace('\n', '')

    create_mv_table2 = """
    create table t2 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv_table3 = """
    create table t3 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv2 = "create materialized view t_mv2 on cluster default to t2 as select a from t1"
    create_mv3 = "create materialized view t_mv3 on cluster default to t3 as select a from t1"

    with Given("Tables t1, t2, t3 and MVs t1->t2, t1-t3 are created"):
        clickhouse.query(chi, create_table)
        clickhouse.query(chi, create_mv_table2)
        clickhouse.query(chi, create_mv_table3)

        clickhouse.query(chi, create_mv2)
        clickhouse.query(chi, create_mv3)

        with When("Add a row to an old partition"):
            clickhouse.query(chi,
                             "insert into t1(a,d) values(6, today()-1)",
                             host=host0)

        with When("Stop fetches for t1 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t1", host=host1)

            with Then("Wait 10 seconds and the data should be dropped by TTL"):
                time.sleep(10)
                out = clickhouse.query(chi,
                                       "select count() from t1 where a=6",
                                       host=host0)
                assert out == "0"

        with When("Resume fetches for t1 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t1",
                             host=host1)
            time.sleep(5)

            with Then("Inserts should resume"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(7)",
                                 host=host0)

        clickhouse.query(chi, "insert into t1(a) values(1)")

        with When("Stop fetches for t2 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t2", host=host1)

            with Then("Insert should fail since it can not reach the quorum"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(2)", host=host0)
                assert "Timeout while waiting for quorum" in out

        # kubectl(f"exec {host0}-0 -n test -- cp /var/lib//clickhouse/data/default/t2/all_1_1_0/a.mrk2 /var/lib//clickhouse/data/default/t2/all_1_1_0/a.bin")
        # with Then("Corrupt data part in t2"):
        #    kubectl(f"exec {host0}-0 -n test -- sed -i \"s/b/c/\" /var/lib/clickhouse/data/default/t2/all_1_1_0/a.bin")

        with When("Resume fetches for t2 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t2",
                             host=host1)
            i = 0
            while "2" != clickhouse.query(
                    chi,
                    "select active_replicas from system.replicas where database='default' and table='t1'",
                    pod=host0) and i < 10:
                with Then("Not ready, wait 5 seconds"):
                    time.sleep(5)
                    i += 1

            with Then(
                    "Inserts should fail with an error regarding not satisfied quorum"
            ):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(3)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out

            with And("Second insert of the same block should pass"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(3)",
                                 host=host0)

            with And("Insert of the new block should fail"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(4)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out

            with And(
                    "Second insert of the same block with 'deduplicate_blocks_in_dependent_materialized_views' setting should fail"
            ):
                out = clickhouse.query_with_error(
                    chi,
                    "set deduplicate_blocks_in_dependent_materialized_views=1; insert into t1(a) values(5)",
                    host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out

        out = clickhouse.query_with_error(
            chi,
            "select t1.a t1_a, t2.a t2_a from t1 left outer join t2 using (a) order by t1_a settings join_use_nulls=1"
        )
        print(out)
예제 #13
0
 def get_log_path(self):
     log_name = self.parser.get_default('logging', 'log_name', 'SHProject.log')
     log_dir = self.parser.get_default('logging', 'log_dir', 'log')
     return get_full_path(log_dir, log_name)
예제 #14
0
 def get_config_path():
     return get_full_path('utils', CONFIG_FILE_NAME)
예제 #15
0
 def __init__(self):
     self.mysql_creds = None
     self.rabbitmq_creds = None
     self._get_vcap_service()
     self.parser = DefaultConfigParser()
     self.parser.read(get_full_path('utils', CONFIG_FILE_NAME))
예제 #16
0
 def __init__(self):
     self.mysql_creds = None
     self.rabbitmq_creds = None
     self._get_vcap_service()
     self.parser = DefaultConfigParser()
     self.parser.read(get_full_path("utils", CONFIG_FILE_NAME))
예제 #17
0
if main():
    with Module("main"):
        with Given(f"Clean namespace {settings.test_namespace}"):
            kubectl.delete_all_chi(settings.test_namespace)
            kubectl.delete_ns(settings.test_namespace, ok_to_fail=True)
            kubectl.create_ns(settings.test_namespace)

        with Given(
                f"clickhouse-operator version {settings.operator_version} is installed"
        ):
            if kubectl.get_count("pod",
                                 ns=settings.operator_namespace,
                                 label="-l app=clickhouse-operator") == 0:
                config = util.get_full_path(
                    '../deploy/operator/clickhouse-operator-install-template.yaml'
                )
                kubectl.apply(
                    ns=settings.operator_namespace,
                    config=f"<(cat {config} | "
                    f"OPERATOR_IMAGE=\"{settings.operator_docker_repo}:{settings.operator_version}\" "
                    f"OPERATOR_NAMESPACE=\"{settings.operator_namespace}\" "
                    f"METRICS_EXPORTER_IMAGE=\"{settings.metrics_exporter_docker_repo}:{settings.operator_version}\" "
                    f"METRICS_EXPORTER_NAMESPACE=\"{settings.operator_namespace}\" "
                    f"envsubst)",
                    validate=False)
            test_operator.set_operator_version(settings.operator_version)

        with Given(
                f"Install ClickHouse template {settings.clickhouse_template}"):
            kubectl.apply(util.get_full_path(settings.clickhouse_template),
예제 #18
0
 def get_config_path():
     return get_full_path("utils", CONFIG_FILE_NAME)
예제 #19
0
 def get_log_path(self):
     log_name = self.parser.get_default("logging", "log_name", "SHProject.log")
     log_dir = self.parser.get_default("logging", "log_dir", "log")
     return get_full_path(log_dir, log_name)
예제 #20
0
def test_014():
    require_zookeeper()

    create_table = """
    CREATE TABLE test_local(a Int8) 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    PARTITION BY tuple() 
    ORDER BY a
    """.replace('\r', '').replace('\n', '')

    config = "configs/test-014-replication-1.yaml"
    chi = manifest.get_chi_name(util.get_full_path(config))
    cluster = "default"

    kubectl.create_and_check(
        config=config,
        check={
            "apply_templates": {
                settings.clickhouse_template,
                "templates/tpl-persistent-volume-100Mi.yaml",
            },
            "object_counts": {
                "statefulset": 2,
                "pod": 2,
                "service": 3,
            },
            "do_not_delete": 1,
        })

    start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                   ".status.startTime")

    schema_objects = ['test_local', 'test_view', 'test_mv', 'a_view']
    with Given("Create schema objects"):
        clickhouse.query(chi, create_table, host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW test_view as SELECT * from test_local",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW a_view as SELECT * from test_view",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE MATERIALIZED VIEW test_mv Engine = Log as SELECT * from test_local",
            host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE DICTIONARY test_dict (a Int8, b Int8) PRIMARY KEY a SOURCE(CLICKHOUSE(host 'localhost' port 9000 table 'test_local' user 'default')) LAYOUT(FLAT()) LIFETIME(0)",
            host=f"chi-{chi}-{cluster}-0-0")

    with Given(
            "Replicated table is created on a first replica and data is inserted"
    ):
        clickhouse.query(chi,
                         "INSERT INTO test_local values(1)",
                         host=f"chi-{chi}-{cluster}-0-0")
        with When("Table is created on the second replica"):
            clickhouse.query(chi,
                             create_table,
                             host=f"chi-{chi}-{cluster}-0-1")
            # Give some time for replication to catch up
            time.sleep(10)
            with Then("Data should be replicated"):
                out = clickhouse.query(chi,
                                       "SELECT a FROM test_local",
                                       host=f"chi-{chi}-{cluster}-0-1")
                assert out == "1"

    with When("Add one more replica"):
        kubectl.create_and_check(config="configs/test-014-replication-2.yaml",
                                 check={
                                     "pod_count": 3,
                                     "do_not_delete": 1,
                                 })
        # Give some time for replication to catch up
        time.sleep(10)

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Schema objects should be migrated to the new replica"):
            for obj in schema_objects:
                out = clickhouse.query(
                    chi,
                    f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                    host=f"chi-{chi}-{cluster}-0-2")
                assert out == "1"
            # Check dictionary
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.dictionaries WHERE name = 'test_dict'",
                host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

        with And("Replicated table should have the data"):
            out = clickhouse.query(chi,
                                   "SELECT a FROM test_local",
                                   host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

    with When("Remove replica"):
        kubectl.create_and_check(config=config,
                                 check={
                                     "pod_count": 1,
                                     "do_not_delete": 1,
                                 })

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Replica needs to be removed from the Zookeeper as well"):
            out = clickhouse.query(
                chi,
                "SELECT count() FROM system.replicas WHERE table='test_local'")
            assert out == "1"

    with When("Restart Zookeeper pod"):
        with Then("Delete Zookeeper pod"):
            kubectl.launch("delete pod zookeeper-0")
            time.sleep(1)

        with Then(
                "Insert into the table while there is no Zookeeper -- table should be in readonly mode"
        ):
            out = clickhouse.query_with_error(
                chi, "INSERT INTO test_local values(2)")
            assert "Table is in readonly mode" in out

        with Then("Wait for Zookeeper pod to come back"):
            kubectl.wait_object("pod", "zookeeper-0")
            kubectl.wait_pod_status("zookeeper-0", "Running")

        with Then(
                "Wait for ClickHouse to reconnect to Zookeeper and switch to read-write mode"
        ):
            time.sleep(30)
        # with Then("Restart clickhouse pods"):
        #    kubectl("delete pod chi-test-014-replication-default-0-0-0")
        #    kubectl("delete pod chi-test-014-replication-default-0-1-0")

        with Then("Table should be back to normal"):
            clickhouse.query(chi, "INSERT INTO test_local values(3)")

    kubectl.delete_chi("test-014-replication")
예제 #21
0
def test_013():
    config = "configs/test-013-add-shards-1.yaml"
    chi = manifest.get_chi_name(util.get_full_path(config))
    cluster = "default"

    kubectl.create_and_check(config=config,
                             check={
                                 "apply_templates": {
                                     settings.clickhouse_template,
                                 },
                                 "object_counts": {
                                     "statefulset": 1,
                                     "pod": 1,
                                     "service": 2,
                                 },
                                 "do_not_delete": 1,
                             })
    start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                   ".status.startTime")

    schema_objects = [
        'test_local',
        'test_distr',
        'events-distr',
    ]
    with Then("Create local and distributed tables"):
        clickhouse.query(
            chi,
            "CREATE TABLE test_local Engine = Log as SELECT * FROM system.one")
        clickhouse.query(
            chi,
            "CREATE TABLE test_distr as test_local Engine = Distributed('default', default, test_local)"
        )
        clickhouse.query(chi, "CREATE DATABASE \\\"test-db\\\"")
        clickhouse.query(
            chi,
            "CREATE TABLE \\\"test-db\\\".\\\"events-distr\\\" as system.events "
            "ENGINE = Distributed('all-sharded', system, events)")

    with Then("Add shards"):
        kubectl.create_and_check(config="configs/test-013-add-shards-2.yaml",
                                 check={
                                     "object_counts": {
                                         "statefulset": 3,
                                         "pod": 3,
                                         "service": 4,
                                     },
                                     "do_not_delete": 1,
                                 })

    # Give some time for replication to catch up
    time.sleep(10)

    with Then("Unaffected pod should not be restarted"):
        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

    with And("Schema objects should be migrated to new shards"):
        for obj in schema_objects:
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                host=f"chi-{chi}-{cluster}-1-0")
            assert out == "1"
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                host=f"chi-{chi}-{cluster}-2-0")
            assert out == "1"

    with When("Remove shards"):
        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        time.sleep(10)
        with Then("Unaffected pod should not be restarted"):
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-{cluster}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

    kubectl.delete_chi(chi)
예제 #22
0
from testflows.core import Given, main, Scenario, Module, Fail, Error

if main():
    with Module("main"):
        with Given(f"Clean namespace {settings.test_namespace}"):
            kubectl.delete_all_chi(settings.test_namespace)
            kubectl.delete_ns(settings.test_namespace, ok_to_fail=True)
            kubectl.create_ns(settings.test_namespace)

        with Given(
                f"clickhouse-operator version {settings.operator_version} is installed"
        ):
            if kubectl.get_count("pod",
                                 ns=settings.operator_namespace,
                                 label="-l app=clickhouse-operator") == 0:
                config = util.get_full_path(
                    settings.clickhouse_operator_install)
                kubectl.apply(
                    ns=settings.operator_namespace,
                    config=f"<(cat {config} | "
                    f"OPERATOR_IMAGE=\"{settings.operator_docker_repo}:{settings.operator_version}\" "
                    f"OPERATOR_NAMESPACE=\"{settings.operator_namespace}\" "
                    f"METRICS_EXPORTER_IMAGE=\"{settings.metrics_exporter_docker_repo}:{settings.operator_version}\" "
                    f"METRICS_EXPORTER_NAMESPACE=\"{settings.operator_namespace}\" "
                    f"envsubst)",
                    validate=False)
            util.set_operator_version(settings.operator_version)

        with Given(
                f"Install ClickHouse template {settings.clickhouse_template}"):
            kubectl.apply(util.get_full_path(settings.clickhouse_template),
                          settings.test_namespace)
예제 #23
0
def test_021(config="configs/test-021-rescale-volume-01.yaml"):
    with Given("Default storage class is expandable"):
        default_storage_class = kubectl.get_default_storage_class()
        assert default_storage_class is not None
        assert len(default_storage_class) > 0
        allow_volume_expansion = kubectl.get_field("storageclass",
                                                   default_storage_class,
                                                   ".allowVolumeExpansion")
        if allow_volume_expansion != "true":
            kubectl.launch(
                f"patch storageclass {default_storage_class} -p '{{\"allowVolumeExpansion\":true}}'"
            )

    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(config=config,
                             check={
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })

    with Then("Storage size should be 100Mi"):
        size = kubectl.get_pvc_size(
            "disk1-chi-test-021-rescale-volume-simple-0-0-0")
        assert size == "100Mi"

    with When("Re-scale volume configuration to 200Mb"):
        kubectl.create_and_check(
            config="configs/test-021-rescale-volume-02-enlarge-disk.yaml",
            check={
                "pod_count": 1,
                "do_not_delete": 1,
            })

        with Then("Storage size should be 200Mi"):
            size = kubectl.get_pvc_size(
                "disk1-chi-test-021-rescale-volume-simple-0-0-0")
            assert size == "200Mi"

    with When("Add second disk 50Mi"):
        kubectl.create_and_check(
            config="configs/test-021-rescale-volume-03-add-disk.yaml",
            check={
                "pod_count": 1,
                "pod_volumes": {
                    "/var/lib/clickhouse",
                    "/var/lib/clickhouse2",
                },
                "do_not_delete": 1,
            })

        with Then("There should be two PVC"):
            size = kubectl.get_pvc_size(
                "disk1-chi-test-021-rescale-volume-simple-0-0-0")
            assert size == "200Mi"
            size = kubectl.get_pvc_size(
                "disk2-chi-test-021-rescale-volume-simple-0-0-0")
            assert size == "50Mi"

        with And("There should be two disks recognized by ClickHouse"):
            # ClickHouse requires some time to mount volume. Race conditions.

            time.sleep(120)
            out = clickhouse.query(chi, "SELECT count() FROM system.disks")
            print("SELECT count() FROM system.disks RETURNED:")
            print(out)
            assert out == "2"

    kubectl.delete_chi(chi)