def test_longest_running_query(self):
    long_running_pod, long_running_svc, _, _ = alerts.random_pod_choice_for_callbacks(
        chi)
    # 600s trigger + 2*30s - double prometheus scraping interval
    clickhouse.query(
        chi["metadata"]["name"],
        "SELECT now(),sleepEachRow(1),number FROM system.numbers LIMIT 660",
        host=long_running_svc,
        timeout=670)
    with Then("check ClickHouseLongestRunningQuery firing"):
        fired = alerts.wait_alert_state("ClickHouseLongestRunningQuery",
                                        "firing",
                                        True,
                                        labels={"hostname": long_running_svc},
                                        time_range='30s')
        assert fired, error(
            "can't get ClickHouseLongestRunningQuery alert in firing state")
    with Then("check ClickHouseLongestRunningQuery gone away"):
        resolved = alerts.wait_alert_state(
            "ClickHouseLongestRunningQuery",
            "firing",
            False,
            labels={"hostname": long_running_svc})
        assert resolved, error(
            "can't check ClickHouseLongestRunningQuery alert is gone away")
示例#2
0
def test_017():
    kubectl.create_and_check(config="configs/test-017-multi-version.yaml",
                             check={
                                 "pod_count": 4,
                                 "do_not_delete": 1,
                             })
    chi = "test-017-multi-version"
    queries = [
        "CREATE TABLE test_max (epoch Int32, offset SimpleAggregateFunction(max, Int64)) ENGINE = AggregatingMergeTree() ORDER BY epoch",
        "insert into test_max select 0, 3650487030+number from numbers(5) settings max_block_size=1",
        "insert into test_max select 0, 5898217176+number from numbers(5)",
        "insert into test_max select 0, 5898217176+number from numbers(10) settings max_block_size=1",
        "OPTIMIZE TABLE test_max FINAL"
    ]

    for q in queries:
        print(f"{q}")
    test_query = "select min(offset), max(offset) from test_max"
    print(f"{test_query}")

    for shard in range(4):
        host = f"chi-{chi}-default-{shard}-0"
        for q in queries:
            clickhouse.query(chi, host=host, sql=q)
        out = clickhouse.query(chi, host=host, sql=test_query)
        ver = clickhouse.query(chi, host=host, sql="select version()")

        print(f"version: {ver}, result: {out}")

    kubectl.delete_chi(chi)
def test_replicas_max_abosulute_delay():
    stop_replica_pod, stop_replica_svc, insert_pod, insert_svc = random_pod_choice_for_callbacks()
    create_table_on_cluster('all-replicated', 'default.test_repl',
                            '(event_time DateTime, test UInt64) ENGINE ReplicatedMergeTree(\'/clickhouse/tables/{installation}-{shard}/{uuid}/test_repl\', \'{replica}\') ORDER BY tuple()')
    prometheus_scrape_interval = 15

    def restart_clickhouse_and_insert_to_replicated_table():
        with When(f"stop replica fetches on {stop_replica_svc}"):
            sql = "SYSTEM STOP FETCHES default.test_repl"
            kubectl.launch(
                f"exec -n {kubectl.namespace} {stop_replica_pod} -c clickhouse -- clickhouse-client -q \"{sql}\"",
                ok_to_fail=True,
            )
            sql = "INSERT INTO default.test_repl SELECT now(), number FROM numbers(100000)"
            kubectl.launch(
                f"exec -n {kubectl.namespace} {insert_pod} -c clickhouse -- clickhouse-client -q \"{sql}\"",
            )

    with Then("check ClickHouseReplicasMaxAbsoluteDelay firing"):
        fired = wait_alert_state("ClickHouseReplicasMaxAbsoluteDelay", "firing", True, labels={"hostname": stop_replica_svc},
                                 time_range='60s', sleep_time=prometheus_scrape_interval * 2,
                                 callback=restart_clickhouse_and_insert_to_replicated_table)
        assert fired, error("can't get ClickHouseReadonlyReplica alert in firing state")

    clickhouse.query(
        chi["metadata"]["name"],
        "SYSTEM START FETCHES; SYSTEM RESTART REPLICAS; SYSTEM SYNC REPLICA default.test_repl",
        host=stop_replica_svc, timeout=240
    )
    with Then("check ClickHouseReplicasMaxAbsoluteDelay gone away"):
        resolved = wait_alert_state("ClickHouseReplicasMaxAbsoluteDelay", "firing", False, labels={"hostname": stop_replica_svc})
        assert resolved, error("can't check ClickHouseReplicasMaxAbsoluteDelay alert is gone away")

    drop_table_on_cluster('all-replicated', 'default.test_repl')
def create_distributed_table_on_cluster(cluster_name='all-sharded', distr_table='default.test_distr', local_table='default.test',
                                        fields_definition='(event_time DateTime, test UInt64)',
                                        local_engine='ENGINE MergeTree() ORDER BY tuple()',
                                        distr_engine='ENGINE Distributed("all-sharded",default, test, test)'):
    create_table_on_cluster(cluster_name, local_table, fields_definition + ' ' + local_engine)
    create_distr_sql = f'CREATE TABLE {distr_table} ON CLUSTER \\\"{cluster_name}\\\" {fields_definition} {distr_engine}'
    clickhouse.query(chi["metadata"]["name"], create_distr_sql, timeout=120)
    def insert_many_parts_to_clickhouse():
        stop_merges = "SYSTEM STOP MERGES default.test;"
        min_block = "SET max_block_size=1; SET max_insert_block_size=1; SET min_insert_block_size_rows=1;"
        with When(f"Insert to MergeTree table {parts_limits} parts"):
            r = parts_limits
            sql = stop_merges + min_block + f"INSERT INTO default.test(event_time, test) SELECT now(),number FROM system.numbers LIMIT {r};"
            clickhouse.query(chi_name,
                             sql,
                             host=selected_svc,
                             ns=kubectl.namespace)

            # @TODO we need only one query after resolve https://github.com/ClickHouse/ClickHouse/issues/11384 and switch to 21.3+
            sql = min_block + "INSERT INTO default.test(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1;"
            clickhouse.query_with_error(chi_name,
                                        sql,
                                        host=selected_svc,
                                        ns=kubectl.namespace)
            with Then(
                    f"wait prometheus_scrape_interval={prometheus_scrape_interval}*2 sec"
            ):
                time.sleep(prometheus_scrape_interval * 2)

            sql = min_block + "INSERT INTO default.test(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1;"
            clickhouse.query_with_error(chi_name,
                                        sql,
                                        host=selected_svc,
                                        ns=kubectl.namespace)
示例#6
0
def test_015():
    kubectl.create_and_check(config="configs/test-015-host-network.yaml",
                             check={
                                 "pod_count": 2,
                                 "do_not_delete": 1,
                             })

    time.sleep(30)

    with Then("Query from one server to another one should work"):
        out = clickhouse.query(
            "test-015-host-network",
            host="chi-test-015-host-network-default-0-0",
            port="10000",
            sql=
            "SELECT * FROM remote('chi-test-015-host-network-default-0-1', system.one)"
        )
        print("remote out=")
        print(out)

    with Then("Distributed query should work"):
        out = clickhouse.query(
            "test-015-host-network",
            host="chi-test-015-host-network-default-0-0",
            port="10000",
            sql=
            "SELECT count() FROM cluster('all-sharded', system.one) settings receive_timeout=10"
        )
        print("cluster out=")
        print(out)
        assert out == "2"

    kubectl.delete_chi("test-015-host-network")
示例#7
0
def prepare_table_for_backup(backup_pod, rows=1000):
    backup_name = f'test_backup_{time.strftime("%Y-%m-%d_%H%M%S")}'
    clickhouse.query(
        chi['metadata']['name'],
        "CREATE TABLE IF NOT EXISTS default.test_backup ON CLUSTER 'all-sharded' (i UInt64) ENGINE MergeTree() ORDER BY tuple();"
        f"INSERT INTO default.test_backup SELECT number FROM numbers({rows})",
        pod=backup_pod)
    return backup_name
 def run_queries_with_priority():
     sql = ""
     for i in range(50):
         sql += f"SET priority={i % 20};SELECT uniq(number) FROM numbers(20000000):"
     cmd = f"echo \\\"{sql} SELECT 1\\\" | xargs -i'{{}}' --no-run-if-empty -d ':' -P 20 clickhouse-client --time -m -n -q \\\"{{}}\\\""
     kubectl.launch(f"exec {priority_pod} -- bash -c \"{cmd}\"", timeout=120)
     clickhouse.query(
         chi["metadata"]["name"],
         "SELECT event_time, CurrentMetric_QueryPreempted FROM system.metric_log WHERE CurrentMetric_QueryPreempted > 0",
         host=priority_svc,
     )
 def attach_all_parts():
     detached_parts = clickhouse.query(
         chi["metadata"]["name"],
         "SELECT name FROM system.detached_parts WHERE database='default' AND table='test' AND reason=''",
         pod=detached_pod)
     all_parts = ""
     for part in detached_parts.splitlines():
         all_parts += f"ALTER TABLE default.test ATTACH PART '{part}';"
     if all_parts.strip() != "":
         clickhouse.query(chi["metadata"]["name"],
                          all_parts,
                          pod=detached_pod)
 def create_part_and_detach():
     clickhouse.query(
         chi["metadata"]["name"],
         "INSERT INTO default.test SELECT now(), number FROM numbers(100)",
         pod=detached_pod)
     part_name = clickhouse.query(
         chi["metadata"]["name"],
         sql=
         "SELECT name FROM system.parts WHERE database='default' AND table='test' ORDER BY modification_time DESC LIMIT 1",
         pod=detached_pod)
     clickhouse.query(chi["metadata"]["name"],
                      f"ALTER TABLE default.test DETACH PART '{part_name}'",
                      pod=detached_pod)
def test_020(config="configs/test-020-multi-volume.yaml"):
    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(
        config=config,
        check={
            "pod_count": 1,
            "pod_volumes": {
                "/var/lib/clickhouse",
                "/var/lib/clickhouse2",
            },
            "do_not_delete": 1,
        })

    with When("Create a table and insert 1 row"):
        clickhouse.query(chi, "create table test_disks(a Int8) Engine = MergeTree() order by a")
        clickhouse.query(chi, "insert into test_disks values (1)")

        with Then("Data should be placed on default disk"):
            out = clickhouse.query(chi, "select disk_name from system.parts where table='test_disks'")
            assert out == 'default'

    with When("alter table test_disks move partition tuple() to disk 'disk2'"):
        clickhouse.query(chi, "alter table test_disks move partition tuple() to disk 'disk2'")

        with Then("Data should be placed on disk2"):
            out = clickhouse.query(chi, "select disk_name from system.parts where table='test_disks'")
            assert out == 'disk2'

    kubectl.delete_chi(chi)
示例#12
0
def test_backup_size(self):
    decrease_pod, _, increase_pod, _ = alerts.random_pod_choice_for_callbacks(
        chi)

    backup_cases = {
        decrease_pod: {
            'rows': (10000, 1000),
            'decrease': True,
        },
        increase_pod: {
            'rows': (1000, 10000),
            'decrease': False,
        },
    }
    for backup_pod in backup_cases:
        decrease = backup_cases[backup_pod]['decrease']
        for backup_rows in backup_cases[backup_pod]['rows']:
            backup_name = prepare_table_for_backup(backup_pod,
                                                   rows=backup_rows)
            exec_on_backup_container(
                backup_pod,
                f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"'
            )
            wait_backup_command_status(backup_pod,
                                       f'create {backup_name}',
                                       expected_status='success')
            if decrease:
                clickhouse.query(chi['metadata']['name'],
                                 f"TRUNCATE TABLE default.test_backup",
                                 pod=backup_pod)
            time.sleep(15)
        fired = alerts.wait_alert_state(
            "ClickHouseBackupSizeChanged",
            "firing",
            expected_state=True,
            sleep_time=settings.prometheus_scrape_interval,
            labels={"pod_name": backup_pod},
            time_range='60s')
        assert fired, error(
            f"can't get ClickHouseBackupSizeChanged alert in firing state, decrease={decrease}"
        )

        with Then("check ClickHouseBackupSizeChanged gone away"):
            resolved = alerts.wait_alert_state("ClickHouseBackupSizeChanged",
                                               "firing",
                                               expected_state=False,
                                               labels={"pod_name": backup_pod})
            assert resolved, error(
                f"can't get ClickHouseBackupSizeChanged alert is gone away, decrease={decrease}"
            )
    def reboot_clickhouse_and_distributed_exection():
        # we need 70 delayed files for catch
        insert_sql = 'INSERT INTO default.test_distr(event_time, test) SELECT now(), number FROM system.numbers LIMIT 10000'
        select_sql = 'SELECT count() FROM default.test_distr'
        with Then("reboot clickhouse-server pod"):
            kubectl.launch(
                f"exec -n {kubectl.namespace} {restarted_pod} -c clickhouse -- kill 1",
                ok_to_fail=True,
            )
            with And("Insert to distributed table"):
                clickhouse.query(chi["metadata"]["name"], insert_sql, host=delayed_pod, ns=kubectl.namespace)

            with And("Select from distributed table"):
                clickhouse.query_with_error(chi["metadata"]["name"], select_sql, host=delayed_pod,
                                            ns=kubectl.namespace)
示例#14
0
文件: api.py 项目: jkee/mini-bi
def table():
    table = request.args['table']
    fields = clickhouse.query('DESC TABLE ' + table)
    # first column: name
    # second: type
    res = {'fields': [[f[0], f[1]] for f in fields]}
    return jsonify(res)
示例#15
0
def drop_table():
    sql = "DROP TABLE IF EXISTS {db}.{name}"\
        .format(db=settings.CLICKHOUSE['db_export'], name=settings.BASE_TABLE)
    res = clickhouse.query(sql)

    if (res['returncode'] != 0):
        logger.error('There is an error: %s', repr(res['err']))
        sys.exit(-1)
示例#16
0
def check_table():
    sql = """EXISTS TABLE {db}.{name}"""\
            .format(db=settings.CLICKHOUSE['db_export'], name=settings.BASE_TABLE)
    res = clickhouse.query(sql)
    if (res['returncode'] == 0):
        if str(res['out'].splitlines()).strip('[]') == "'1'":
            return True
        else:
            return False        
    else:
        logger.error('There is an error: %s', repr(res['err']))
        sys.exit(-1)
示例#17
0
def test_ch_002(self):
    kubectl.create_and_check(
        "configs/test-ch-002-row-level.yaml", {
            "apply_templates": {"templates/tpl-clickhouse-20.3.yaml"},
            "do_not_delete": 1,
        })

    chi = "test-ch-002-row-level"
    create_table = """create table test (d Date default today(), team LowCardinality(String), user String) Engine = MergeTree() PARTITION BY d ORDER BY d;"""

    with When("Create test table"):
        clickhouse.query(chi, create_table)

    with And("Insert some data"):
        clickhouse.query(
            chi,
            "INSERT INTO test(team, user) values('team1', 'user1'),('team2', 'user2'),('team3', 'user3'),('team4', 'user4')"
        )

    with Then(
            "Make another query for different users. It should be restricted to corresponding team by row-level security"
    ):
        for user in ['user1', 'user2', 'user3', 'user4']:
            out = clickhouse.query(chi, "select user from test", user=user)
            assert out == user

    with Then(
            "Make a count() query for different users. It should be restricted to corresponding team by row-level security"
    ):
        for user in ['user1', 'user2', 'user3', 'user4']:
            out = clickhouse.query(chi, "select count() from test", user=user)
            assert out == "1"

    kubectl.delete_chi(chi)
示例#18
0
def get_tables():
    """Get available table names from clickhouse that contains log data"""
    sql = "select name from system.tables where database='{db}' and name like 'log_%_%_%'"\
        .format(db=settings.CLICKHOUSE['db_export'])
    res = clickhouse.query(sql)
    if (res['returncode'] == 0):
        tables = res['out'].splitlines()
        tables.sort()
        # Do not take the most recent table, because it can be still in use
        return tables[:-1]
    else:
        logger.error('There is an error: %s', repr(res['err']))
        sys.exit(-1)
示例#19
0
def create_table():
    sql = """CREATE TABLE {db}.{name} (
        date Date,
        type_id UInt16,
		item_id UInt32,
        tube_id UInt32,
        country String,
        num_views AggregateFunction(count)) 
        ENGINE = AggregatingMergeTree(date, (date, type_id, tube_id, country, item_id), 8192)"""\
            .format(db=settings.CLICKHOUSE['db_export'], name=settings.BASE_TABLE)
    res = clickhouse.query(sql)
    if (res['returncode'] != 0 and res['returncode'] != 57):
        logger.error('There is an error: %s', repr(res['err']))
        sys.exit(-1)
示例#20
0
def wait_clickhouse_cluster_ready(chi):
    with Given("All expected pods present in system.clusters"):
        all_pods_ready = False
        while all_pods_ready is False:
            all_pods_ready = True
            for pod in chi['status']['pods']:
                cluster_response = clickhouse.query(
                    chi["metadata"]["name"],
                    "SYSTEM RELOAD CONFIG; SELECT host_name FROM system.clusters WHERE cluster='all-sharded'",
                    pod=pod
                )
                for host in chi['status']['fqdns']:
                    svc_short_name = host.replace(f'.{settings.test_namespace}.svc.cluster.local', '')
                    if svc_short_name not in cluster_response:
                        with Then("Not ready, sleep 5 seconds"):
                            all_pods_ready = False
                            time.sleep(5)
def test_distributed_files_to_insert():
    delayed_pod, delayed_svc, restarted_pod, restarted_svc = random_pod_choice_for_callbacks()
    create_distributed_table_on_cluster()

    insert_sql = 'INSERT INTO default.test_distr(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1000'
    clickhouse.query(
        chi["metadata"]["name"], 'SYSTEM STOP DISTRIBUTED SENDS default.test_distr',
        pod=delayed_pod, ns=kubectl.namespace
    )

    files_to_insert_from_metrics = 0
    files_to_insert_from_disk = 0
    tries = 0
    # we need more than 50 delayed files for catch
    while files_to_insert_from_disk <= 55 and files_to_insert_from_metrics <= 55 and tries < 500:
        kubectl.launch(
            f"exec -n {kubectl.namespace} {restarted_pod} -c clickhouse -- kill 1",
            ok_to_fail=True,
        )
        clickhouse.query(chi["metadata"]["name"], insert_sql, pod=delayed_pod, host=delayed_pod, ns=kubectl.namespace)
        files_to_insert_from_metrics = clickhouse.query(
            chi["metadata"]["name"], "SELECT value FROM system.metrics WHERE metric='DistributedFilesToInsert'",
            pod=delayed_pod, ns=kubectl.namespace
        )
        files_to_insert_from_metrics = int(files_to_insert_from_metrics)

        files_to_insert_from_disk = int(kubectl.launch(
            f"exec -n {kubectl.namespace} {delayed_pod} -c clickhouse -- bash -c 'ls -la /var/lib/clickhouse/data/default/test_distr/*/*.bin 2>/dev/null | wc -l'",
            ok_to_fail=False,
        ))

    with When("reboot clickhouse-server pod"):
        fired = wait_alert_state("ClickHouseDistributedFilesToInsertHigh", "firing", True,
                                 labels={"hostname": delayed_svc, "chi": chi["metadata"]["name"]})
        assert fired, error("can't get ClickHouseDistributedFilesToInsertHigh alert in firing state")

    kubectl.wait_pod_status(restarted_pod, "Running", ns=kubectl.namespace)

    clickhouse.query(
        chi["metadata"]["name"], 'SYSTEM START DISTRIBUTED SENDS default.test_distr',
        pod=delayed_pod, ns=kubectl.namespace
    )

    with Then("check ClickHouseDistributedFilesToInsertHigh gone away"):
        resolved = wait_alert_state("ClickHouseDistributedFilesToInsertHigh", "firing", False, labels={"hostname": delayed_svc})
        assert resolved, error("can't check ClickHouseDistributedFilesToInsertHigh alert is gone away")

    drop_distributed_table_on_cluster()
示例#22
0
def test_zookeeper_rescale(self):
    with When('create replicated table'):
        clickhouse.create_table_on_cluster(
            chi, 'all-sharded', 'default.zk_repl',
            '(id UInt64) ENGINE=ReplicatedMergeTree(\'/clickhouse/tables/default.zk_repl/{shard}\',\'{replica}\') ORDER BY (id)'
        )
    with Then('insert data x1'):
        clickhouse.query(
            chi['metadata']['name'],
            'INSERT INTO default.zk_repl SELECT number FROM numbers(1000)',
            pod="chi-test-cluster-for-zk-default-0-0-0")
    with Then('scale up zookeeper to 3 nodes'):
        util.require_zookeeper('zookeeper-3-nodes-1GB-for-tests-only.yaml',
                               force_install=True)
        kubectl.wait_pod_status('zookeeper-0', 'Running',
                                settings.test_namespace)
        kubectl.wait_pod_status('zookeeper-1', 'Running',
                                settings.test_namespace)
        kubectl.wait_pod_status('zookeeper-2', 'Running',
                                settings.test_namespace)

    with Then('insert data x2'):
        clickhouse.query(
            chi['metadata']['name'],
            'INSERT INTO default.zk_repl SELECT number*2 FROM numbers(1000)',
            pod="chi-test-cluster-for-zk-default-0-1-0")

    with Then('scale down zookeeper to 1 nodes'):
        util.require_zookeeper('zookeeper-1-node-1GB-for-tests-only.yaml',
                               force_install=True)
        kubectl.wait_pod_status('zookeeper-0', 'Running',
                                settings.test_namespace)

    with Then('insert data x2'):
        clickhouse.query(
            chi['metadata']['name'],
            'INSERT INTO default.zk_repl SELECT number*3 FROM numbers(1000)',
            pod="chi-test-cluster-for-zk-default-0-0-0")

    assert clickhouse.query(chi['metadata']['name'],
                            'SELECT count() FROM default.zk_repl',
                            pod="chi-test-cluster-for-zk-default-0-1-0"
                            ) == '3000', "Invalid rows after 3x1000 inserts"

    clickhouse.drop_table_on_cluster(chi, 'all-sharded', 'default.zk_repl')
def test_019(config="configs/test-019-retain-volume.yaml"):
    require_zookeeper()

    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(
        config=config,
        check={
            "pod_count": 1,
            "do_not_delete": 1,
        })

    create_non_replicated_table = "create table t1 Engine = Log as select 1 as a"
    create_replicated_table = """
    create table t2 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    partition by tuple() order by a
    as select 1 as a""".replace('\r', '').replace('\n', '')

    with Given("ClickHouse has some data in place"):
        clickhouse.query(chi, sql=create_non_replicated_table)
        clickhouse.query(chi, sql=create_replicated_table)

    with When("CHI with retained volume is deleted"):
        pvc_count = kubectl.get_count("pvc")
        pv_count = kubectl.get_count("pv")

        kubectl.delete_chi(chi)

        with Then("PVC should be retained"):
            assert kubectl.get_count("pvc") == pvc_count
            assert kubectl.get_count("pv") == pv_count

    with When("Re-create CHI"):
        kubectl.create_and_check(
            config=config,
            check={
                "pod_count": 1,
                "do_not_delete": 1,
            })

    with Then("PVC should be re-mounted"):
        with And("Non-replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t1")
            assert out == "1"
        with And("Replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t2")
            assert out == "1"

    kubectl.delete_chi(chi)
def test_delayed_and_rejected_insert_and_max_part_count_for_partition_and_low_inserted_rows_per_query():
    create_table_on_cluster()
    delayed_pod, delayed_svc, rejected_pod, rejected_svc = random_pod_choice_for_callbacks()

    prometheus_scrape_interval = 15
    # default values in system.merge_tree_settings
    parts_to_throw_insert = 300
    parts_to_delay_insert = 150
    chi_name = chi["metadata"]["name"]

    parts_limits = parts_to_delay_insert
    selected_svc = delayed_svc

    def insert_many_parts_to_clickhouse():
        stop_merges = "SYSTEM STOP MERGES default.test;"
        min_block = "SET max_block_size=1; SET max_insert_block_size=1; SET min_insert_block_size_rows=1;"
        with When(f"Insert to MergeTree table {parts_limits} parts"):
            r = parts_limits
            sql = stop_merges + min_block + \
                  "INSERT INTO default.test(event_time, test) SELECT now(), number FROM system.numbers LIMIT %d;" % r
            clickhouse.query(chi_name, sql, host=selected_svc, ns=kubectl.namespace)

            # @TODO we need only one query after resolve https://github.com/ClickHouse/ClickHouse/issues/11384
            sql = min_block + "INSERT INTO default.test(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1;"
            clickhouse.query_with_error(chi_name, sql, host=selected_svc, ns=kubectl.namespace)
            with And(f"wait prometheus_scrape_interval={prometheus_scrape_interval}*2 sec"):
                time.sleep(prometheus_scrape_interval * 2)

            sql = min_block + "INSERT INTO default.test(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1;"
            clickhouse.query_with_error(chi_name, sql, host=selected_svc, ns=kubectl.namespace)

    insert_many_parts_to_clickhouse()
    with Then("check ClickHouseDelayedInsertThrottling firing"):
        fired = wait_alert_state("ClickHouseDelayedInsertThrottling", "firing", True, labels={"hostname": delayed_svc}, time_range="30s",
                                 sleep_time=5)
        assert fired, error("can't get ClickHouseDelayedInsertThrottling alert in firing state")
    with Then("check ClickHouseMaxPartCountForPartition firing"):
        fired = wait_alert_state("ClickHouseMaxPartCountForPartition", "firing", True, labels={"hostname": delayed_svc}, time_range="45s",
                                 sleep_time=5)
        assert fired, error("can't get ClickHouseMaxPartCountForPartition alert in firing state")
    with Then("check ClickHouseLowInsertedRowsPerQuery firing"):
        fired = wait_alert_state("ClickHouseLowInsertedRowsPerQuery", "firing", True, labels={"hostname": delayed_svc}, time_range="60s",
                                 sleep_time=5)
        assert fired, error("can't get ClickHouseLowInsertedRowsPerQuery alert in firing state")

    clickhouse.query(chi_name, "SYSTEM START MERGES default.test", host=selected_svc, ns=kubectl.namespace)

    with Then("check ClickHouseDelayedInsertThrottling gone away"):
        resolved = wait_alert_state("ClickHouseDelayedInsertThrottling", "firing", False, labels={"hostname": delayed_svc}, sleep_time=5)
        assert resolved, error("can't check ClickHouseDelayedInsertThrottling alert is gone away")
    with Then("check ClickHouseMaxPartCountForPartition gone away"):
        resolved = wait_alert_state("ClickHouseMaxPartCountForPartition", "firing", False, labels={"hostname": delayed_svc}, sleep_time=5)
        assert resolved, error("can't check ClickHouseMaxPartCountForPartition alert is gone away")
    with Then("check ClickHouseLowInsertedRowsPerQuery gone away"):
        resolved = wait_alert_state("ClickHouseLowInsertedRowsPerQuery", "firing", False, labels={"hostname": delayed_svc}, sleep_time=5)
        assert resolved, error("can't check ClickHouseLowInsertedRowsPerQuery alert is gone away")

    parts_limits = parts_to_throw_insert
    selected_svc = rejected_svc
    insert_many_parts_to_clickhouse()
    with Then("check ClickHouseRejectedInsert firing"):
        fired = wait_alert_state("ClickHouseRejectedInsert", "firing", True, labels={"hostname": rejected_svc}, time_range="30s",
                                 sleep_time=5)
        assert fired, error("can't get ClickHouseRejectedInsert alert in firing state")

    with Then("check ClickHouseRejectedInsert gone away"):
        resolved = wait_alert_state("ClickHouseRejectedInsert", "firing", False, labels={"hostname": rejected_svc}, sleep_time=5)
        assert resolved, error("can't check ClickHouseRejectedInsert alert is gone away")

    clickhouse.query(chi_name, "SYSTEM START MERGES default.test", host=selected_svc, ns=kubectl.namespace)
    drop_table_on_cluster()
示例#25
0
def test_013():
    config = "configs/test-013-add-shards-1.yaml"
    chi = manifest.get_chi_name(util.get_full_path(config))
    cluster = "default"

    kubectl.create_and_check(config=config,
                             check={
                                 "apply_templates": {
                                     settings.clickhouse_template,
                                 },
                                 "object_counts": {
                                     "statefulset": 1,
                                     "pod": 1,
                                     "service": 2,
                                 },
                                 "do_not_delete": 1,
                             })
    start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                   ".status.startTime")

    schema_objects = [
        'test_local',
        'test_distr',
        'events-distr',
    ]
    with Then("Create local and distributed tables"):
        clickhouse.query(
            chi,
            "CREATE TABLE test_local Engine = Log as SELECT * FROM system.one")
        clickhouse.query(
            chi,
            "CREATE TABLE test_distr as test_local Engine = Distributed('default', default, test_local)"
        )
        clickhouse.query(chi, "CREATE DATABASE \\\"test-db\\\"")
        clickhouse.query(
            chi,
            "CREATE TABLE \\\"test-db\\\".\\\"events-distr\\\" as system.events "
            "ENGINE = Distributed('all-sharded', system, events)")

    with Then("Add shards"):
        kubectl.create_and_check(config="configs/test-013-add-shards-2.yaml",
                                 check={
                                     "object_counts": {
                                         "statefulset": 3,
                                         "pod": 3,
                                         "service": 4,
                                     },
                                     "do_not_delete": 1,
                                 })

    # Give some time for replication to catch up
    time.sleep(10)

    with Then("Unaffected pod should not be restarted"):
        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

    with And("Schema objects should be migrated to new shards"):
        for obj in schema_objects:
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                host=f"chi-{chi}-{cluster}-1-0")
            assert out == "1"
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                host=f"chi-{chi}-{cluster}-2-0")
            assert out == "1"

    with When("Remove shards"):
        kubectl.create_and_check(config=config,
                                 check={
                                     "object_counts": {
                                         "statefulset": 1,
                                         "pod": 1,
                                         "service": 2,
                                     },
                                     "do_not_delete": 1,
                                 })
        time.sleep(10)
        with Then("Unaffected pod should not be restarted"):
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-{cluster}-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

    kubectl.delete_chi(chi)
示例#26
0
def test_ch_001(self):
    util.require_zookeeper()
    chit_data = manifest.get_chit_data(
        util.get_full_path("templates/tpl-clickhouse-19.11.yaml"))
    kubectl.launch(f"delete chit {chit_data['metadata']['name']}",
                   ns=settings.test_namespace)
    kubectl.create_and_check(
        "configs/test-ch-001-insert-quorum.yaml", {
            "apply_templates": {"templates/tpl-clickhouse-20.8.yaml"},
            "pod_count": 2,
            "do_not_delete": 1,
        })
    chi = manifest.get_chi_name(
        util.get_full_path("configs/test-ch-001-insert-quorum.yaml"))
    chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi)
    util.wait_clickhouse_cluster_ready(chi_data)

    host0 = "chi-test-ch-001-insert-quorum-default-0-0"
    host1 = "chi-test-ch-001-insert-quorum-default-0-1"

    create_table = """
    create table t1 on cluster default (a Int8, d Date default today())
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by d order by a 
    TTL d + interval 5 second
    SETTINGS merge_with_ttl_timeout=5""".replace('\r', '').replace('\n', '')

    create_mv_table2 = """
    create table t2 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv_table3 = """
    create table t3 on cluster default (a Int8)
    Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
    partition by tuple() order by a""".replace('\r', '').replace('\n', '')

    create_mv2 = "create materialized view t_mv2 on cluster default to t2 as select a from t1"
    create_mv3 = "create materialized view t_mv3 on cluster default to t3 as select a from t1"

    with Given("Tables t1, t2, t3 and MVs t1->t2, t1-t3 are created"):
        clickhouse.query(chi, create_table)
        clickhouse.query(chi, create_mv_table2)
        clickhouse.query(chi, create_mv_table3)

        clickhouse.query(chi, create_mv2)
        clickhouse.query(chi, create_mv3)

        with When("Add a row to an old partition"):
            clickhouse.query(chi,
                             "insert into t1(a,d) values(6, today()-1)",
                             host=host0)

        with When("Stop fetches for t1 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t1", host=host1)

            with Then("Wait 10 seconds and the data should be dropped by TTL"):
                time.sleep(10)
                out = clickhouse.query(chi,
                                       "select count() from t1 where a=6",
                                       host=host0)
                assert out == "0"

        with When("Resume fetches for t1 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t1",
                             host=host1)
            time.sleep(5)

            with Then("Inserts should resume"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(7)",
                                 host=host0)

        clickhouse.query(chi, "insert into t1(a) values(1)")

        with When("Stop fetches for t2 at replica1"):
            clickhouse.query(chi, "system stop fetches default.t2", host=host1)

            with Then("Insert should fail since it can not reach the quorum"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(2)", host=host0)
                assert "Timeout while waiting for quorum" in out

        # kubectl(f"exec {host0}-0 -n test -- cp /var/lib//clickhouse/data/default/t2/all_1_1_0/a.mrk2 /var/lib//clickhouse/data/default/t2/all_1_1_0/a.bin")
        # with Then("Corrupt data part in t2"):
        #    kubectl(f"exec {host0}-0 -n test -- sed -i \"s/b/c/\" /var/lib/clickhouse/data/default/t2/all_1_1_0/a.bin")

        with When("Resume fetches for t2 at replica1"):
            clickhouse.query(chi,
                             "system start fetches default.t2",
                             host=host1)
            i = 0
            while "2" != clickhouse.query(
                    chi,
                    "select active_replicas from system.replicas where database='default' and table='t1'",
                    pod=host0) and i < 10:
                with Then("Not ready, wait 5 seconds"):
                    time.sleep(5)
                    i += 1

            with Then(
                    "Inserts should fail with an error regarding not satisfied quorum"
            ):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(3)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out

            with And("Second insert of the same block should pass"):
                clickhouse.query(chi,
                                 "insert into t1(a) values(3)",
                                 host=host0)

            with And("Insert of the new block should fail"):
                out = clickhouse.query_with_error(
                    chi, "insert into t1(a) values(4)", host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out

            with And(
                    "Second insert of the same block with 'deduplicate_blocks_in_dependent_materialized_views' setting should fail"
            ):
                out = clickhouse.query_with_error(
                    chi,
                    "set deduplicate_blocks_in_dependent_materialized_views=1; insert into t1(a) values(5)",
                    host=host0)
                assert "Quorum for previous write has not been satisfied yet" in out

        out = clickhouse.query_with_error(
            chi,
            "select t1.a t1_a, t2.a t2_a from t1 left outer join t2 using (a) order by t1_a settings join_use_nulls=1"
        )
        print(out)
def drop_distributed_table_on_cluster(cluster_name='all-sharded', distr_table='default.test_distr', local_table='default.test'):
    drop_distr_sql = f'DROP TABLE {distr_table} ON CLUSTER \\\"{cluster_name}\\\"'
    clickhouse.query(chi["metadata"]["name"], drop_distr_sql, timeout=120)
    drop_table_on_cluster(cluster_name, local_table)
def create_table_on_cluster(cluster_name='all-sharded', table='default.test',
                            create_definition='(event_time DateTime, test UInt64) ENGINE MergeTree() ORDER BY tuple()'):
    create_local_sql = f'CREATE TABLE {table} ON CLUSTER \\\"{cluster_name}\\\" {create_definition}'
    clickhouse.query(chi["metadata"]["name"], create_local_sql, timeout=120)
def drop_table_on_cluster(cluster_name='all-sharded', table='default.test'):
    drop_local_sql = f'DROP TABLE {table} ON CLUSTER \\\"{cluster_name}\\\" SYNC'
    clickhouse.query(chi["metadata"]["name"], drop_local_sql, timeout=120)
示例#30
0
def test_014():
    require_zookeeper()

    create_table = """
    CREATE TABLE test_local(a Int8) 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    PARTITION BY tuple() 
    ORDER BY a
    """.replace('\r', '').replace('\n', '')

    config = "configs/test-014-replication-1.yaml"
    chi = manifest.get_chi_name(util.get_full_path(config))
    cluster = "default"

    kubectl.create_and_check(
        config=config,
        check={
            "apply_templates": {
                settings.clickhouse_template,
                "templates/tpl-persistent-volume-100Mi.yaml",
            },
            "object_counts": {
                "statefulset": 2,
                "pod": 2,
                "service": 3,
            },
            "do_not_delete": 1,
        })

    start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                   ".status.startTime")

    schema_objects = ['test_local', 'test_view', 'test_mv', 'a_view']
    with Given("Create schema objects"):
        clickhouse.query(chi, create_table, host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW test_view as SELECT * from test_local",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(chi,
                         "CREATE VIEW a_view as SELECT * from test_view",
                         host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE MATERIALIZED VIEW test_mv Engine = Log as SELECT * from test_local",
            host=f"chi-{chi}-{cluster}-0-0")
        clickhouse.query(
            chi,
            "CREATE DICTIONARY test_dict (a Int8, b Int8) PRIMARY KEY a SOURCE(CLICKHOUSE(host 'localhost' port 9000 table 'test_local' user 'default')) LAYOUT(FLAT()) LIFETIME(0)",
            host=f"chi-{chi}-{cluster}-0-0")

    with Given(
            "Replicated table is created on a first replica and data is inserted"
    ):
        clickhouse.query(chi,
                         "INSERT INTO test_local values(1)",
                         host=f"chi-{chi}-{cluster}-0-0")
        with When("Table is created on the second replica"):
            clickhouse.query(chi,
                             create_table,
                             host=f"chi-{chi}-{cluster}-0-1")
            # Give some time for replication to catch up
            time.sleep(10)
            with Then("Data should be replicated"):
                out = clickhouse.query(chi,
                                       "SELECT a FROM test_local",
                                       host=f"chi-{chi}-{cluster}-0-1")
                assert out == "1"

    with When("Add one more replica"):
        kubectl.create_and_check(config="configs/test-014-replication-2.yaml",
                                 check={
                                     "pod_count": 3,
                                     "do_not_delete": 1,
                                 })
        # Give some time for replication to catch up
        time.sleep(10)

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Schema objects should be migrated to the new replica"):
            for obj in schema_objects:
                out = clickhouse.query(
                    chi,
                    f"SELECT count() FROM system.tables WHERE name = '{obj}'",
                    host=f"chi-{chi}-{cluster}-0-2")
                assert out == "1"
            # Check dictionary
            out = clickhouse.query(
                chi,
                f"SELECT count() FROM system.dictionaries WHERE name = 'test_dict'",
                host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

        with And("Replicated table should have the data"):
            out = clickhouse.query(chi,
                                   "SELECT a FROM test_local",
                                   host=f"chi-{chi}-{cluster}-0-2")
            assert out == "1"

    with When("Remove replica"):
        kubectl.create_and_check(config=config,
                                 check={
                                     "pod_count": 1,
                                     "do_not_delete": 1,
                                 })

        new_start_time = kubectl.get_field("pod", f"chi-{chi}-{cluster}-0-0-0",
                                           ".status.startTime")
        assert start_time == new_start_time

        with Then("Replica needs to be removed from the Zookeeper as well"):
            out = clickhouse.query(
                chi,
                "SELECT count() FROM system.replicas WHERE table='test_local'")
            assert out == "1"

    with When("Restart Zookeeper pod"):
        with Then("Delete Zookeeper pod"):
            kubectl.launch("delete pod zookeeper-0")
            time.sleep(1)

        with Then(
                "Insert into the table while there is no Zookeeper -- table should be in readonly mode"
        ):
            out = clickhouse.query_with_error(
                chi, "INSERT INTO test_local values(2)")
            assert "Table is in readonly mode" in out

        with Then("Wait for Zookeeper pod to come back"):
            kubectl.wait_object("pod", "zookeeper-0")
            kubectl.wait_pod_status("zookeeper-0", "Running")

        with Then(
                "Wait for ClickHouse to reconnect to Zookeeper and switch to read-write mode"
        ):
            time.sleep(30)
        # with Then("Restart clickhouse pods"):
        #    kubectl("delete pod chi-test-014-replication-default-0-0-0")
        #    kubectl("delete pod chi-test-014-replication-default-0-1-0")

        with Then("Table should be back to normal"):
            clickhouse.query(chi, "INSERT INTO test_local values(3)")

    kubectl.delete_chi("test-014-replication")
示例#31
0
def test_016():
    chi = "test-016-settings"
    kubectl.create_and_check(config="configs/test-016-settings-01.yaml",
                             check={
                                 "apply_templates": {
                                     settings.clickhouse_template,
                                 },
                                 "pod_count": 1,
                                 "do_not_delete": 1,
                             })

    with Then("Custom macro 'layer' should be available"):
        out = clickhouse.query(
            chi,
            sql="select substitution from system.macros where macro='layer'")
        assert out == "01"

    with And("Custom macro 'test' should be available"):
        out = clickhouse.query(
            chi,
            sql="select substitution from system.macros where macro='test'")
        assert out == "test"

    with And("dictGet() should work"):
        out = clickhouse.query(chi,
                               sql="select dictGet('one', 'one', toUInt64(0))")
        assert out == "0"

    with And("query_log should be disabled"):
        clickhouse.query(chi, sql="system flush logs")
        out = clickhouse.query_with_error(
            chi, sql="select count() from system.query_log")
        assert "doesn't exist" in out

    with And("max_memory_usage should be 7000000000"):
        out = clickhouse.query(
            chi,
            sql=
            "select value from system.settings where name='max_memory_usage'")
        assert out == "7000000000"

    with And("test_usersd user should be available"):
        clickhouse.query(chi, sql="select version()", user="******")

    with And("user1 user should be available"):
        clickhouse.query(chi,
                         sql="select version()",
                         user="******",
                         pwd="qwerty")

    with And("system.clusters should be empty due to remote_servers override"):
        out = clickhouse.query(chi, sql="select count() from system.clusters")
        assert out == "0"

    with When("Update usersd settings"):
        start_time = kubectl.get_field("pod", f"chi-{chi}-default-0-0-0",
                                       ".status.startTime")
        kubectl.create_and_check(config="configs/test-016-settings-02.yaml",
                                 check={
                                     "do_not_delete": 1,
                                 })
        with Then("Wait for configmap changes to apply"):
            config_map_applied_num = "0"
            i = 1
            while config_map_applied_num == "0" and i < 10:
                config_map_applied_num = kubectl.launch(
                    f"exec chi-{chi}-default-0-0-0 -- bash -c \"grep test_norestart /etc/clickhouse-server/users.d/my_users.xml | wc -l\""
                )
                if config_map_applied_num != "0":
                    break
                with And(f"not applied, wait {15 * i}s"):
                    time.sleep(15 * i)
                    i += 1

            assert config_map_applied_num != "0", "ConfigMap should be applied"

        version = ""
        with Then("test_norestart user should be available"):
            version = clickhouse.query(chi,
                                       sql="select version()",
                                       user="******")
        with And("user1 user should not be available"):
            version_user1 = clickhouse.query_with_error(chi,
                                                        sql="select version()",
                                                        user="******",
                                                        pwd="qwerty")
            assert version != version_user1
        with And("user2 user should be available"):
            version_user2 = clickhouse.query(chi,
                                             sql="select version()",
                                             user="******",
                                             pwd="qwerty")
            assert version == version_user2
        with And("ClickHouse should not be restarted"):
            new_start_time = kubectl.get_field("pod",
                                               f"chi-{chi}-default-0-0-0",
                                               ".status.startTime")
            assert start_time == new_start_time

    kubectl.delete_chi("test-016-settings")
示例#32
0
文件: mini_bi.py 项目: jkee/mini-bi
def query():
    query = request.args['query']
    res = clickhouse.query(query)
    return render_template('bi.html', query=query, result=res[0][0])