コード例 #1
0
ファイル: test.py プロジェクト: zvrr/ClickHouse
def test_login_as_dropped_user_xml():
    for _ in range(0, 2):
        instance.exec_in_container(["bash", "-c" , """
            cat > /etc/clickhouse-server/users.d/user_c.xml << EOF
<?xml version="1.0"?>
<yandex>
    <users>
        <C>
            <no_password/>
        </C>
    </users>
</yandex>
EOF"""])

        assert_eq_with_retry(instance, "SELECT name FROM system.users WHERE name='C'", "C")

        instance.exec_in_container(["bash", "-c" , "rm /etc/clickhouse-server/users.d/user_c.xml"])

        expected_error = "no user with such name"
        while True:
            out, err = instance.query_and_get_answer_with_error("SELECT 1", user='******')
            if expected_error in err:
                logging.debug(f"Got error '{expected_error}' just as expected")
                break
            if out == "1\n":
                logging.debug(f"Got output '1', retrying...")
                time.sleep(0.5)
                continue
            raise Exception(f"Expected either output '1' or error '{expected_error}', got output={out} and error={err}")
            
        assert instance.query("SELECT name FROM system.users WHERE name='C'") == ""
コード例 #2
0
def test_concurrent_backups_on_different_nodes():
    create_and_fill_table()

    assert num_concurrent_backups <= num_nodes
    backup_names = [new_backup_name() for _ in range(num_concurrent_backups)]

    ids = []
    for i in range(num_concurrent_backups):
        id = (nodes[i].query(
            f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_names[i]} ASYNC"
        ).split("\t")[0])
        ids.append(id)

    for i in range(num_concurrent_backups):
        assert_eq_with_retry(
            nodes[i],
            f"SELECT status FROM system.backups WHERE status == 'CREATING_BACKUP' AND id = '{ids[i]}'",
            "",
        )

    for i in range(num_concurrent_backups):
        assert nodes[i].query(
            f"SELECT status, error FROM system.backups WHERE id = '{ids[i]}'"
        ) == TSV([["BACKUP_CREATED", ""]])

    for i in range(num_concurrent_backups):
        nodes[i].query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
        nodes[i].query(
            f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_names[i]}")
        nodes[i].query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
        for j in range(num_nodes):
            assert nodes[j].query("SELECT sum(x) FROM tbl") == TSV(
                [expected_sum])
コード例 #3
0
def test_mixed_granularity_single_node(start_dynamic_cluster, node):
    node.query("INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 1, 333), (toDate('2018-10-02'), 2, 444)")
    node.query("INSERT INTO table_with_default_granularity VALUES (toDate('2018-09-01'), 1, 333), (toDate('2018-09-02'), 2, 444)")

    def callback(n):
        n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", "<yandex><merge_tree><enable_mixed_granularity_parts>1</enable_mixed_granularity_parts></merge_tree></yandex>")
        n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", "<yandex><merge_tree><enable_mixed_granularity_parts>1</enable_mixed_granularity_parts></merge_tree></yandex>")

    node.restart_with_latest_version(callback_onstop=callback)
    node.query("SYSTEM RELOAD CONFIG")
    assert_eq_with_retry(node, "SELECT value FROM system.merge_tree_settings WHERE name='enable_mixed_granularity_parts'", '1')
    assert node.query("SELECT count() from table_with_default_granularity") == '4\n'
    node.query("INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 3, 333), (toDate('2018-10-02'), 4, 444)")
    assert node.query("SELECT count() from table_with_default_granularity") == '6\n'
    node.query("OPTIMIZE TABLE table_with_default_granularity PARTITION 201810 FINAL")
    assert node.query("SELECT count() from table_with_default_granularity") == '6\n'
    path_to_merged_part = node.query("SELECT path FROM system.parts WHERE table = 'table_with_default_granularity' AND active=1 ORDER BY partition DESC LIMIT 1").strip()
    node.exec_in_container(["bash", "-c", "find {p} -name '*.mrk2' | grep '.*'".format(p=path_to_merged_part)]) # check that we have adaptive files

    path_to_old_part = node.query("SELECT path FROM system.parts WHERE table = 'table_with_default_granularity' AND active=1 ORDER BY partition ASC LIMIT 1").strip()

    node.exec_in_container(["bash", "-c", "find {p} -name '*.mrk' | grep '.*'".format(p=path_to_old_part)]) # check that we have non adaptive files

    node.query("ALTER TABLE table_with_default_granularity UPDATE dummy = dummy + 1 WHERE 1")
    # still works
    assert node.query("SELECT count() from table_with_default_granularity") == '6\n'

    node.query("ALTER TABLE table_with_default_granularity MODIFY COLUMN dummy String")
    node.query("ALTER TABLE table_with_default_granularity ADD COLUMN dummy2 Float64")

    #still works
    assert node.query("SELECT count() from table_with_default_granularity") == '6\n'
コード例 #4
0
def test_atomic_database(started_cluster):
    node1.query('''DROP DATABASE IF EXISTS replica_1 ON CLUSTER cross_3shards_2replicas;
                   DROP DATABASE IF EXISTS replica_2 ON CLUSTER cross_3shards_2replicas;
                   CREATE DATABASE replica_1 ON CLUSTER cross_3shards_2replicas ENGINE=Atomic;
                   CREATE DATABASE replica_2 ON CLUSTER cross_3shards_2replicas ENGINE=Atomic;''')

    assert "It's not supported for cross replication" in \
           node1.query_and_get_error("CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n")
    assert "It's not supported for cross replication" in \
           node1.query_and_get_error("CREATE TABLE replica_1.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n")
    assert "It's not supported for cross replication" in \
           node1.query_and_get_error("CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n")
    assert "It's not supported for cross replication" in \
           node1.query_and_get_error("CREATE TABLE replica_2.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n")
    assert "For a distributed DDL on circular replicated cluster its table name must be qualified by database name" in \
           node1.query_and_get_error("CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard}/rmt/', '{replica}') ORDER BY n")

    node1.query("CREATE TABLE replica_1.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard}/rmt/', '{replica}') ORDER BY n")
    node1.query("CREATE TABLE replica_2.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard_bk}/rmt/', '{replica_bk}') ORDER BY n")

    assert node1.query("SELECT countDistinct(uuid) from remote('node1,node2,node3', 'system', 'databases') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='replica_1'") == "1\n"
    assert node1.query("SELECT countDistinct(uuid) from remote('node1,node2,node3', 'system', 'tables') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='rmt'") == "2\n"

    node1.query("INSERT INTO replica_1.rmt VALUES (1, 'test')")
    node2.query("SYSTEM SYNC REPLICA replica_2.rmt", timeout=5)
    assert_eq_with_retry(node2, "SELECT * FROM replica_2.rmt", '1\ttest')
コード例 #5
0
def test_no_ttl_merges_in_busy_pool(started_cluster):
    node1.query(
        "CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0"
    )

    node1.query("SYSTEM STOP TTL MERGES")

    for i in range(1, 7):
        node1.query(
            "INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {}, number FROM numbers(5)"
            .format(i))

    node1.query(
        "ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0"
    )

    while count_running_mutations(node1, "test_ttl") < 6:
        print "Mutations count", count_running_mutations(node1, "test_ttl")
        assert count_ttl_merges_in_background_pool(node1, "test_ttl") == 0
        time.sleep(0.5)

    node1.query("SYSTEM START TTL MERGES")

    while count_running_mutations(node1, "test_ttl") == 6:
        print "Mutations count after start TTL", count_running_mutations(
            node1, "test_ttl")
        assert node1.query("SELECT count() FROM test_ttl") == "30\n"
        time.sleep(0.5)

    assert_eq_with_retry(node1, "SELECT COUNT() FROM test_ttl", "0")
コード例 #6
0
ファイル: test.py プロジェクト: zhengguilin/ClickHouse
def test_in_memory(start_cluster):
    node9.query("SYSTEM STOP MERGES")
    node10.query("SYSTEM STOP MERGES")

    for size in [200, 200, 300, 600]:
        insert_random_data('in_memory_table', node9, size)
    node10.query("SYSTEM SYNC REPLICA in_memory_table", timeout=20)

    assert node9.query("SELECT count() FROM in_memory_table") == "1300\n"
    assert node10.query("SELECT count() FROM in_memory_table") == "1300\n"

    expected = "Compact\t1\nInMemory\t2\nWide\t1\n"

    assert TSV(node9.query("SELECT part_type, count() FROM system.parts " \
        "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV(expected)
    assert TSV(node10.query("SELECT part_type, count() FROM system.parts " \
        "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV(expected)

    node9.query("SYSTEM START MERGES")
    node10.query("SYSTEM START MERGES")

    assert_eq_with_retry(
        node9,
        "OPTIMIZE TABLE in_memory_table FINAL SETTINGS optimize_throw_if_noop = 1",
        "")
    node10.query("SYSTEM SYNC REPLICA in_memory_table", timeout=20)

    assert node9.query("SELECT count() FROM in_memory_table") == "1300\n"
    assert node10.query("SELECT count() FROM in_memory_table") == "1300\n"

    assert TSV(node9.query("SELECT part_type, count() FROM system.parts " \
        "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV("Wide\t1\n")
    assert TSV(node10.query("SELECT part_type, count() FROM system.parts " \
        "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV("Wide\t1\n")
コード例 #7
0
def test_different_versions_cluster(start_static_cluster, first_node,
                                    second_node, table):
    counter = 1
    for n1, n2 in ((first_node, second_node), (second_node, first_node)):
        n1.query(
            "INSERT INTO {tbl} VALUES (toDate('2018-10-01'), {c1}, 333), (toDate('2018-10-02'), {c2}, 444)"
            .format(tbl=table, c1=counter * 2, c2=counter * 2 + 1))
        n2.query("SYSTEM SYNC REPLICA {tbl}".format(tbl=table))
        assert_eq_with_retry(n2, "SELECT count() from {tbl}".format(tbl=table),
                             str(counter * 2))
        n1.query("DETACH TABLE {tbl}".format(tbl=table))
        n2.query("DETACH TABLE {tbl}".format(tbl=table))
        n1.query("ATTACH TABLE {tbl}".format(tbl=table))
        n2.query("ATTACH TABLE {tbl}".format(tbl=table))
        assert_eq_with_retry(n1, "SELECT count() from {tbl}".format(tbl=table),
                             str(counter * 2))
        assert_eq_with_retry(n2, "SELECT count() from {tbl}".format(tbl=table),
                             str(counter * 2))
        n1.query("OPTIMIZE TABLE {tbl} FINAL".format(tbl=table))
        n2.query("SYSTEM SYNC REPLICA {tbl}".format(tbl=table))
        assert_eq_with_retry(n1, "SELECT count() from {tbl}".format(tbl=table),
                             str(counter * 2))
        assert_eq_with_retry(n2, "SELECT count() from {tbl}".format(tbl=table),
                             str(counter * 2))
        counter += 1
コード例 #8
0
def test_system_users_async():
    instance.query(
        "CREATE USER u1 IDENTIFIED BY 'qwe123' SETTINGS custom_c = 3")

    backup_name = new_backup_name()
    [id, _, status] = instance.query(
        f"BACKUP DATABASE default, TABLE system.users, TABLE system.roles, TABLE system.settings_profiles, TABLE system.row_policies, TABLE system.quotas TO {backup_name} ASYNC"
    ).split("\t")
    assert_eq_with_retry(
        instance,
        f"SELECT status FROM system.backups WHERE uuid='{id}'",
        "BACKUP_COMPLETE\n",
    )

    instance.query("DROP USER u1")

    [id, _, status] = instance.query(
        f"RESTORE DATABASE default, TABLE system.users, TABLE system.roles, TABLE system.settings_profiles, TABLE system.row_policies, TABLE system.quotas FROM {backup_name} ASYNC"
    ).split("\t")
    assert_eq_with_retry(
        instance,
        f"SELECT status FROM system.backups WHERE uuid='{id}'",
        "RESTORED\n",
    )

    assert (
        instance.query("SHOW CREATE USER u1") ==
        "CREATE USER u1 IDENTIFIED WITH sha256_password SETTINGS custom_c = 3\n"
    )
コード例 #9
0
def test_async():
    create_and_fill_table()
    assert instance.query(
        "SELECT count(), sum(x) FROM test.table") == "100\t4950\n"

    backup_name = new_backup_name()
    [id, _, status] = instance.query(
        f"BACKUP TABLE test.table TO {backup_name} ASYNC").split("\t")
    assert status == "MAKING_BACKUP\n" or status == "BACKUP_COMPLETE\n"
    assert_eq_with_retry(
        instance,
        f"SELECT status FROM system.backups WHERE uuid='{id}'",
        "BACKUP_COMPLETE\n",
    )

    instance.query("DROP TABLE test.table")

    [id, _, status] = instance.query(
        f"RESTORE TABLE test.table FROM {backup_name} ASYNC").split("\t")
    assert status == "RESTORING\n" or status == "RESTORED\n"
    assert_eq_with_retry(
        instance, f"SELECT status FROM system.backups WHERE uuid='{id}'",
        "RESTORED\n")

    assert instance.query(
        "SELECT count(), sum(x) FROM test.table") == "100\t4950\n"
コード例 #10
0
def test_no_ttl_merges_in_busy_pool(started_cluster):
    node1.query("DROP TABLE IF EXISTS test_ttl")
    node1.query(
        "CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0")

    node1.query("SYSTEM STOP TTL MERGES")

    for i in range(1, 7):
        node1.query(
            "INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {}, number FROM numbers(5)".format(i))

    node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0")

    while count_running_mutations(node1, "test_ttl") < 6:
        print("Mutations count", count_running_mutations(node1, "test_ttl"))
        assert count_ttl_merges_in_background_pool(node1, "test_ttl", 1) == 0
        time.sleep(0.5)

    node1.query("SYSTEM START TTL MERGES")

    rows_count = []
    while count_running_mutations(node1, "test_ttl") == 6:
        print("Mutations count after start TTL", count_running_mutations(node1, "test_ttl"))
        rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip()))
        time.sleep(0.5)

    # at least several seconds we didn't run any TTL merges and rows count equal
    # to the original value
    assert sum([1 for count in rows_count if count == 30]) > 4

    assert_eq_with_retry(node1, "SELECT COUNT() FROM test_ttl", "0")
コード例 #11
0
ファイル: test.py プロジェクト: zeromem/ClickHouse
def test_replication_after_partition(both_https_cluster):
    node1.query("truncate table test_table")
    node2.query("truncate table test_table")

    manager = PartitionManager()

    def close(num):
        manager.partition_instances(node1, node2, port=9010)
        time.sleep(1)
        manager.heal_all()

    def insert_data_and_check(num):
        node1.query("insert into test_table values('2019-10-15', {}, 888)".format(num))
        time.sleep(0.5)

    closing_pool = Pool(1)
    inserting_pool = Pool(5)
    cres = closing_pool.map_async(close, [random.randint(1, 3) for _ in range(10)])
    ires = inserting_pool.map_async(insert_data_and_check, range(100))

    cres.wait()
    ires.wait()

    assert_eq_with_retry(node1, "SELECT count() FROM test_table", '100')
    assert_eq_with_retry(node2, "SELECT count() FROM test_table", '100')
コード例 #12
0
def test_concurrent_backups(start_cluster):
    node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
    columns = [f"column_{i} UInt64" for i in range(1000)]
    columns_str = ", ".join(columns)
    node.query(
        f"CREATE TABLE s3_test ({columns_str}) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='s3';"
    )
    node.query(
        f"INSERT INTO s3_test SELECT * FROM generateRandom('{columns_str}') LIMIT 10000"
    )

    def create_backup(i):
        backup_name = f"Disk('hdd', '/backups/{i}')"
        node.query(f"BACKUP TABLE s3_test TO {backup_name} ASYNC")

    p = Pool(40)

    p.map(create_backup, range(40))

    assert_eq_with_retry(
        node,
        "SELECT count() FROM system.backups WHERE status != 'BACKUP_CREATED' and status != 'BACKUP_FAILED'",
        "0",
        retry_count=100,
    )
    assert node.query(
        "SELECT count() FROM s3_test where not ignore(*)") == "10000\n"
コード例 #13
0
def test_upgrade_while_mutation(start_cluster):
    node3.query("DROP TABLE IF EXISTS mt1")

    node3.query(
        "CREATE TABLE mt1 (EventDate Date, id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', 'node3') ORDER BY tuple()"
    )

    node3.query("INSERT INTO mt1 select '2020-02-13', number from numbers(100000)")

    node3.query("SYSTEM STOP MERGES mt1")
    node3.query("ALTER TABLE mt1 DELETE WHERE id % 2 == 0")

    node3.query("DETACH TABLE mt1")  # stop being leader
    node3.restart_with_latest_version(signal=9)

    # checks for readonly
    exec_query_with_retry(node3, "OPTIMIZE TABLE mt1", sleep_time=5, retry_count=60)

    node3.query(
        "ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"}
    )
    # will delete nothing, but previous async mutation will finish with this query

    assert_eq_with_retry(node3, "SELECT COUNT() from mt1", "50000\n")

    node3.query("DROP TABLE mt1")
コード例 #14
0
ファイル: test.py プロジェクト: wwjiang007/ClickHouse
def test_insert_clamps_settings():
    for node in [node1, node2]:
        node.query(
            "CREATE TABLE sometable_insert (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;"
        )
        node.query(
            "INSERT INTO sometable_insert VALUES (toDate('2010-01-10'), 1, 1)")

    distributed.query(
        "CREATE TABLE proxy_insert (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable_insert, toUInt64(date));"
    )

    node1.query(
        "ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999"
    )
    node2.query(
        "ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999"
    )

    distributed.query(
        "INSERT INTO proxy_insert VALUES (toDate('2020-02-20'), 2, 2)")
    distributed.query(
        "INSERT INTO proxy_insert VALUES (toDate('2020-02-21'), 2, 2)",
        settings={"max_memory_usage": 5000000},
    )
    distributed.query("SYSTEM FLUSH DISTRIBUTED proxy_insert")
    assert_eq_with_retry(distributed, "SELECT COUNT() FROM proxy_insert", "4")
コード例 #15
0
def test_concurrent_backups_on_same_node():
    create_and_fill_table()

    backup_names = [new_backup_name() for _ in range(num_concurrent_backups)]

    ids = []
    for backup_name in backup_names:
        id = node0.query(
            f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name} ASYNC"
        ).split("\t")[0]
        ids.append(id)

    ids_list = "[" + ", ".join([f"'{id}'" for id in ids]) + "]"

    assert_eq_with_retry(
        node0,
        f"SELECT status FROM system.backups WHERE status == 'CREATING_BACKUP' AND id IN {ids_list}",
        "",
    )

    assert node0.query(
        f"SELECT status, error FROM system.backups WHERE id IN {ids_list}"
    ) == TSV([["BACKUP_CREATED", ""]] * num_concurrent_backups)

    for backup_name in backup_names:
        node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
        node0.query(
            f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
        node0.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
        for i in range(num_nodes):
            assert nodes[i].query("SELECT sum(x) FROM tbl") == TSV(
                [expected_sum])
コード例 #16
0
ファイル: test.py プロジェクト: zp672087110/ClickHouse
def test_merge_doesnt_work_without_zookeeper(start_cluster):
    node1.query("INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)")
    node1.query("INSERT INTO test_table VALUES ('2018-10-01', 4), ('2018-10-02', 5), ('2018-10-03', 6)")
    assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "2\n"

    node1.query("OPTIMIZE TABLE test_table FINAL")
    assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "3\n"

    assert_eq_with_retry(node1, "SELECT count(*) from system.parts where table = 'test_table' and active = 1", "1")

    node1.query("TRUNCATE TABLE test_table")

    assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "0\n"

    node1.query("INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)")
    node1.query("INSERT INTO test_table VALUES ('2018-10-01', 4), ('2018-10-02', 5), ('2018-10-03', 6)")
    assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "2\n"

    with PartitionManager() as pm:
        node1.query("OPTIMIZE TABLE test_table FINAL")
        pm.drop_instance_zk_connections(node1)
        time.sleep(10) # > old_parts_lifetime
        assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "3\n"

    assert_eq_with_retry(node1, "SELECT count(*) from system.parts where table = 'test_table' and active = 1", "1")
コード例 #17
0
ファイル: test.py プロジェクト: wwjiang007/ClickHouse
def test_hdfs_zero_copy_with_ttl_delete(cluster):
    node1 = cluster.instances["node1"]
    node2 = cluster.instances["node2"]
    try:
        node1.query("""
            CREATE TABLE ttl_delete_test ON CLUSTER test_cluster (dt DateTime, id Int64)
            ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/ttl_delete_test', '{replica}')
            ORDER BY (dt, id)
            TTL dt + INTERVAL 2 DAY
            SETTINGS storage_policy='tiered'
            """)

        node1.query(
            "INSERT INTO ttl_delete_test VALUES (now() - INTERVAL 3 DAY, 10)")
        node1.query(
            "INSERT INTO ttl_delete_test VALUES (now() - INTERVAL 1 DAY, 11)")

        node1.query("OPTIMIZE TABLE ttl_delete_test FINAL")
        node2.query("SYSTEM SYNC REPLICA ttl_delete_test", timeout=30)

        assert_eq_with_retry(node1, "SELECT count() FROM ttl_delete_test", "1")
        assert_eq_with_retry(node2, "SELECT count() FROM ttl_delete_test", "1")

        assert (node1.query(
            "SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values") ==
                "(11)")
        assert (node2.query(
            "SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values") ==
                "(11)")
    finally:
        node1.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
        node2.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
コード例 #18
0
def test_table_override(started_cluster):
    if instance.is_built_with_sanitizer() or instance.is_debug_build():
        pytest.skip("Temporary disabled (FIXME)")
    cursor = pg_manager.get_db_cursor()
    table_name = "table_override"
    materialized_database = "test_database"
    create_postgres_table(cursor,
                          table_name,
                          template=postgres_table_template_5)
    instance.query(
        f"create table {table_name}(key Int32, value UUID) engine = PostgreSQL (postgres1, table={table_name})"
    )
    instance.query(
        f"insert into {table_name} select number, generateUUIDv4() from numbers(10)"
    )
    table_overrides = f" TABLE OVERRIDE {table_name} (COLUMNS (key Int32, value UUID) PARTITION BY key)"
    pg_manager.create_materialized_db(
        ip=started_cluster.postgres_ip,
        port=started_cluster.postgres_port,
        settings=[f"materialized_postgresql_tables_list = '{table_name}'"],
        table_overrides=table_overrides,
    )
    assert_nested_table_is_created(instance, table_name, materialized_database)
    result = instance.query(
        f"show create table {materialized_database}.{table_name}")
    print(result)
    expected = "CREATE TABLE test_database.table_override\\n(\\n    `key` Int32,\\n    `value` UUID,\\n    `_sign` Int8() MATERIALIZED 1,\\n    `_version` UInt64() MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)"
    assert result.strip() == expected
    time.sleep(5)
    query = f"select * from {materialized_database}.{table_name} order by key"
    expected = instance.query(f"select * from {table_name} order by key")
    instance.query(f"drop table {table_name} no delay")
    assert_eq_with_retry(instance, query, expected)
コード例 #19
0
    def restart_with_latest_version(self,
                                    stop_start_wait_sec=10,
                                    callback_onstop=None,
                                    signal=15):
        if not self.stay_alive:
            raise Exception("Cannot restart not stay alive container")
        self.exec_in_container(
            ["bash", "-c", "pkill -{} clickhouse".format(signal)], user='******')
        retries = int(stop_start_wait_sec / 0.5)
        local_counter = 0
        # wait stop
        while local_counter < retries:
            if not self.get_process_pid("clickhouse server"):
                break
            time.sleep(0.5)
            local_counter += 1

        if callback_onstop:
            callback_onstop(self)
        self.exec_in_container([
            "bash", "-c",
            "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"
        ],
                               user='******')
        self.exec_in_container([
            "bash", "-c",
            "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"
        ],
                               user='******')
        self.exec_in_container(
            ["bash", "-c", "{} --daemon".format(CLICKHOUSE_START_COMMAND)],
            user=str(os.getuid()))
        from helpers.test_tools import assert_eq_with_retry
        # wait start
        assert_eq_with_retry(self, "select 1", "1", retry_count=retries)
コード例 #20
0
ファイル: test.py プロジェクト: zeromem/ClickHouse
def test_in_memory_wal_rotate(start_cluster):
    # Write every part to single wal
    node11.query("ALTER TABLE restore_table MODIFY SETTING write_ahead_log_max_bytes = 10")
    for i in range(5):
        insert_random_data('restore_table', node11, 50)

    for i in range(5):
        wal_file = os.path.join(node11.path, "database/data/default/restore_table/wal_{0}_{0}.bin".format(i))
        assert os.path.exists(wal_file)

    for node in [node11, node12]:
        node.query("ALTER TABLE restore_table MODIFY SETTING number_of_free_entries_in_pool_to_lower_max_size_of_merge = 0")
        node.query("ALTER TABLE restore_table MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 10000000")

    assert_eq_with_retry(node11, "OPTIMIZE TABLE restore_table FINAL SETTINGS optimize_throw_if_noop = 1", "")
    # Restart to be sure, that clearing stale logs task was ran
    node11.restart_clickhouse(kill=True)

    for i in range(5):
        wal_file = os.path.join(node11.path, "database/data/default/restore_table/wal_{0}_{0}.bin".format(i))
        assert not os.path.exists(wal_file)

    # New wal file was created and ready to write part to it
    wal_file = os.path.join(node11.path, "database/data/default/restore_table/wal.bin")
    assert os.path.exists(wal_file)
    assert os.path.getsize(wal_file) == 0
コード例 #21
0
ファイル: test.py プロジェクト: yixiu2rose/ClickHouse
def test_user_access_ip_change(cluster_with_dns_cache_update, node):
    node_name = node.name
    node_num = node.name[-1]
    # getaddrinfo(...) may hang for a log time without this options
    node.exec_in_container(['bash', '-c', 'echo -e "options timeout:1\noptions attempts:2" >> /etc/resolv.conf'], privileged=True, user='******')

    assert node3.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name)) == "0\n"
    assert node4.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name)) == "0\n"

    set_hosts(node, ['127.255.255.255 node3', '2001:3984:3989::1:88{}4 unknown_host'.format(node_num)])

    cluster.restart_instance_with_ip_change(node3, "2001:3984:3989::1:88{}3".format(node_num))
    cluster.restart_instance_with_ip_change(node4, "2001:3984:3989::1:88{}4".format(node_num))

    with pytest.raises(QueryRuntimeException):
        node3.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name))
    with pytest.raises(QueryRuntimeException):
        node4.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name))
    # now wrong addresses are cached

    set_hosts(node, [])
    retry_count = 60
    if node_name == 'node5':
        # client is not allowed to connect, so execute it directly in container to send query from localhost
        node.exec_in_container(['bash', '-c', 'clickhouse client -q "SYSTEM DROP DNS CACHE"'], privileged=True, user='******')
        retry_count = 1

    assert_eq_with_retry(node3, "SELECT * FROM remote('{}', 'system', 'one')".format(node_name), "0", retry_count=retry_count, sleep_time=1)
    assert_eq_with_retry(node4, "SELECT * FROM remote('{}', 'system', 'one')".format(node_name), "0", retry_count=retry_count, sleep_time=1)
コード例 #22
0
ファイル: test.py プロジェクト: zp672087110/ClickHouse
def test_reload_after_fail_in_cache_dictionary(started_cluster):
    query = instance.query
    query_and_get_error = instance.query_and_get_error

    # Can't get a value from the cache dictionary because the source (table `test.xypairs`) doesn't respond.
    expected_error = "Table test.xypairs doesn't exist"
    assert expected_error in query_and_get_error(
        "SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(1))")
    assert get_status("cache_xypairs") == "LOADED"
    assert expected_error in get_last_exception("cache_xypairs")

    # Create table `test.xypairs`.
    query('''
        DROP TABLE IF EXISTS test.xypairs;
        CREATE TABLE test.xypairs (x UInt64, y UInt64) ENGINE=Log;
        INSERT INTO test.xypairs VALUES (1, 56), (3, 78);
        ''')

    # Cache dictionary now works.
    assert_eq_with_retry(instance,
                         "SELECT dictGet('cache_xypairs', 'y', toUInt64(1))",
                         "56",
                         ignore_error=True)
    query("SELECT dictGet('cache_xypairs', 'y', toUInt64(2))") == "0"
    assert get_last_exception("cache_xypairs") == ""

    # Drop table `test.xypairs`.
    query('DROP TABLE test.xypairs')

    # Values are cached so we can get them.
    query("SELECT dictGet('cache_xypairs', 'y', toUInt64(1))") == "56"
    query("SELECT dictGet('cache_xypairs', 'y', toUInt64(2))") == "0"
    assert get_last_exception("cache_xypairs") == ""

    # But we can't get a value from the source table which isn't cached.
    assert expected_error in query_and_get_error(
        "SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(3))")
    assert expected_error in get_last_exception("cache_xypairs")

    # Passed time should not spoil the cache.
    time.sleep(5)
    query("SELECT dictGet('cache_xypairs', 'y', toUInt64(1))") == "56"
    query("SELECT dictGet('cache_xypairs', 'y', toUInt64(2))") == "0"
    assert expected_error in query_and_get_error(
        "SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(3))")
    assert expected_error in get_last_exception("cache_xypairs")

    # Create table `test.xypairs` again with changed values.
    query('''
        CREATE TABLE test.xypairs (x UInt64, y UInt64) ENGINE=Log;
        INSERT INTO test.xypairs VALUES (1, 57), (3, 79);
        ''')

    # The cache dictionary returns new values now.
    assert_eq_with_retry(instance,
                         "SELECT dictGet('cache_xypairs', 'y', toUInt64(1))",
                         "57")
    query("SELECT dictGet('cache_xypairs', 'y', toUInt64(2))") == "0"
    query("SELECT dictGet('cache_xypairs', 'y', toUInt64(3))") == "79"
    assert get_last_exception("cache_xypairs") == ""
コード例 #23
0
def test_dns_cache_update(cluster_with_dns_cache_update):
    set_hosts(node4, ['127.255.255.255 lost_host'])

    with pytest.raises(QueryRuntimeException):
        node4.query("SELECT * FROM remote('lost_host', 'system', 'one')")

    node4.query(
        "CREATE TABLE distributed_lost_host (dummy UInt8) ENGINE = Distributed(lost_host_cluster, 'system', 'one')"
    )
    with pytest.raises(QueryRuntimeException):
        node4.query("SELECT * FROM distributed_lost_host")

    set_hosts(node4, ['127.0.0.1 lost_host'])

    # Wait a bit until dns cache will be updated
    assert_eq_with_retry(node4,
                         "SELECT * FROM remote('lost_host', 'system', 'one')",
                         "0")
    assert_eq_with_retry(node4, "SELECT * FROM distributed_lost_host", "0")

    assert TSV(
        node4.query(
            "SELECT DISTINCT host_name, host_address FROM system.clusters WHERE cluster='lost_host_cluster'"
        )) == TSV("lost_host\t127.0.0.1\n")
    assert TSV(node4.query("SELECT hostName()")) == TSV("node4")
コード例 #24
0
def test_reload_users_xml_by_timer():
    check_system_quotas(
        [
            [
                "myQuota",
                "e651da9c-a748-8703-061a-7e5e5096dae7",
                "users.xml",
                "['user_name']",
                "[31556952]",
                0,
                "['default']",
                "[]",
            ]
        ]
    )
    system_quota_limits(
        [
            [
                "myQuota",
                31556952,
                0,
                1000,
                500,
                500,
                "\\N",
                "\\N",
                "\\N",
                1000,
                "\\N",
                "\\N",
                "\\N",
            ]
        ]
    )

    time.sleep(1)  # The modification time of the 'quota.xml' file should be different,
    # because config files are reload by timer only when the modification time is changed.
    copy_quota_xml("tiny_limits.xml", reload_immediately=False)
    assert_eq_with_retry(
        instance,
        "SELECT * FROM system.quotas",
        [
            [
                "myQuota",
                "e651da9c-a748-8703-061a-7e5e5096dae7",
                "users.xml",
                ["user_name"],
                "[31556952]",
                0,
                "['default']",
                "[]",
            ]
        ],
    )
    assert_eq_with_retry(
        instance,
        "SELECT * FROM system.quota_limits",
        [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N", "\\N"]],
    )
コード例 #25
0
 def restore_clickhouse(self, retries=100):
     pid = self.get_process_pid("clickhouse")
     if pid:
         raise Exception("ClickHouse has already started")
     self.exec_in_container(["bash", "-c", "{} --daemon".format(CLICKHOUSE_START_COMMAND)], user=str(os.getuid()))
     from helpers.test_tools import assert_eq_with_retry
     # wait start
     assert_eq_with_retry(self, "select 1", "1", retry_count=retries)
コード例 #26
0
ファイル: test.py プロジェクト: zxshinxz/ClickHouse
def test_insert_clamps_settings():
    node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999")
    node2.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999")

    distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-20'), 2, 2)")
    distributed.query("INSERT INTO proxy VALUES (toDate('2020-02-21'), 2, 2)", settings={"max_memory_usage": 5000000})
    distributed.query("SYSTEM FLUSH DISTRIBUTED proxy")
    assert_eq_with_retry(distributed, "SELECT COUNT() FROM proxy", "4")
コード例 #27
0
def test_lost_part_same_replica(start_cluster):
    for node in [node1, node2]:
        node.query(
            "CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{}') ORDER BY tuple() PARTITION BY date"
            .format(node.name))

    node1.query("SYSTEM STOP MERGES mt0")
    node2.query("SYSTEM STOP REPLICATION QUEUES")

    for i in range(5):
        node1.query(
            "INSERT INTO mt0 VALUES ({}, toDate('2020-10-01'))".format(i))

    for i in range(20):
        parts_to_merge = node1.query(
            "SELECT parts_to_merge FROM system.replication_queue")
        if parts_to_merge:
            parts_list = list(sorted(ast.literal_eval(parts_to_merge)))
            print("Got parts list", parts_list)
            if len(parts_list) < 3:
                raise Exception(
                    "Got too small parts list {}".format(parts_list))
            break
        time.sleep(1)

    victim_part_from_the_middle = random.choice(parts_list[1:-1])
    print("Will corrupt part", victim_part_from_the_middle)

    remove_part_from_disk(node1, "mt0", victim_part_from_the_middle)

    node1.query("DETACH TABLE mt0")

    node1.query("ATTACH TABLE mt0")

    node1.query("SYSTEM START MERGES mt0")

    for i in range(10):
        result = node1.query("SELECT count() FROM system.replication_queue")
        if int(result) == 0:
            break
        time.sleep(1)
    else:
        assert False, "Still have something in replication queue:\n" + node1.query(
            "SELECT count() FROM system.replication_queue FORMAT Vertical")

    assert node1.contains_in_log(
        "Created empty part"
    ), "Seems like empty part {} is not created or log message changed".format(
        victim_part_from_the_middle)

    assert node1.query("SELECT COUNT() FROM mt0") == "4\n"

    node2.query("SYSTEM START REPLICATION QUEUES")

    assert_eq_with_retry(node2, "SELECT COUNT() FROM mt0", "4")
    assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue",
                         "0")
コード例 #28
0
    def restart_clickhouse(self, stop_start_wait_sec=5, kill=False):
        if not self.stay_alive:
            raise Exception("clickhouse can be restarted only with stay_alive=True instance")

        self.exec_in_container(["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")], user='******')
        time.sleep(stop_start_wait_sec)
        self.exec_in_container(["bash", "-c", "{} --daemon".format(CLICKHOUSE_START_COMMAND)], user=str(os.getuid()))
        # wait start
        from helpers.test_tools import assert_eq_with_retry
        assert_eq_with_retry(self, "select 1", "1", retry_count=int(stop_start_wait_sec / 0.5), sleep_time=0.5)
コード例 #29
0
ファイル: test.py プロジェクト: wwjiang007/ClickHouse
def test_in_memory_wal_rotate(start_cluster):
    # Write every part to single wal
    node11.query(
        "ALTER TABLE restore_table MODIFY SETTING write_ahead_log_max_bytes = 10"
    )
    for i in range(5):
        insert_random_data("restore_table", node11, 50)

    for i in range(5):
        # Check file exists
        node11.exec_in_container([
            "bash",
            "-c",
            "test -f /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin"
            .format(i),
        ])

    for node in [node11, node12]:
        node.query(
            "ALTER TABLE restore_table MODIFY SETTING number_of_free_entries_in_pool_to_lower_max_size_of_merge = 0"
        )
        node.query(
            "ALTER TABLE restore_table MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 10000000"
        )

    assert_eq_with_retry(
        node11,
        "OPTIMIZE TABLE restore_table FINAL SETTINGS optimize_throw_if_noop = 1",
        "",
    )
    # Restart to be sure, that clearing stale logs task was ran
    node11.restart_clickhouse(kill=True)

    for i in range(5):
        # check file doesn't exist
        node11.exec_in_container([
            "bash",
            "-c",
            "test ! -e /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin"
            .format(i),
        ])

    # New wal file was created and ready to write part to it
    # Check file exists
    node11.exec_in_container([
        "bash", "-c",
        "test -f /var/lib/clickhouse/data/default/restore_table/wal.bin"
    ])
    # Chech file empty
    node11.exec_in_container([
        "bash",
        "-c",
        "test ! -s /var/lib/clickhouse/data/default/restore_table/wal.bin",
    ])
コード例 #30
0
def test_distributed_query_initiator_is_older_than_shard(setup_nodes):
    distributed_query_initiator_old_nodes = [node18_14, node19_13, node19_16]
    shard = new_node
    for i, initiator in enumerate(distributed_query_initiator_old_nodes):
        initiator.query("INSERT INTO dist_table VALUES (3, {})".format(i))

    assert_eq_with_retry(shard, "SELECT COUNT() FROM test_table WHERE id=3",
                         str(len(distributed_query_initiator_old_nodes)))
    assert_eq_with_retry(initiator,
                         "SELECT COUNT() FROM dist_table WHERE id=3",
                         str(len(distributed_query_initiator_old_nodes)))
コード例 #31
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_recovery(start_cluster):
    node1.query("INSERT INTO test_table VALUES (1, 1)")
    time.sleep(1)
    node2.query("DETACH TABLE test_table")

    for i in range(100):
        node1.query("INSERT INTO test_table VALUES (1, {})".format(i))

    node2.query_with_retry("ATTACH TABLE test_table", check_callback=lambda x: len(node2.query("select * from test_table")) > 0)

    assert_eq_with_retry(node2, "SELECT count(*) FROM test_table", node1.query("SELECT count(*) FROM test_table"))
コード例 #32
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_normal_work(normal_work):
    node1.query("insert into test_table values ('2017-06-16', 111, 0)")
    node1.query("insert into real_table values ('2017-06-16', 222, 0)")

    assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '111')
    assert_eq_with_retry(node1, "SELECT id FROM real_table order by id", '222')
    assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '111')

    node1.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table")

    assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '222')
    assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '222')
コード例 #33
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_single_endpoint_connections_count(start_small_cluster):

    def task(count):
        print("Inserting ten times from {}".format(count))
        for i in xrange(count, count + 10):
            node1.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))

    p = Pool(10)
    p.map(task, xrange(0, 100, 10))

    assert_eq_with_retry(node1, "select count() from test_table", "100")
    assert_eq_with_retry(node2, "select count() from test_table", "100")

    assert node2.query("SELECT value FROM system.events where event='CreatedHTTPConnections'") == '1\n'
コード例 #34
0
ファイル: test.py プロジェクト: shenqsdev/ClickHouse
def test_keepalive_timeout(start_small_cluster):
    current_count = int(node1.query("select count() from test_table").strip())
    node1.query("insert into test_table values ('2017-06-16', 777, 0)")
    assert_eq_with_retry(node2, "select count() from test_table", str(current_count + 1))
    # Server keepAliveTimeout is 3 seconds, default client session timeout is 8
    # lets sleep in that interval
    time.sleep(4)

    node1.query("insert into test_table values ('2017-06-16', 888, 0)")

    time.sleep(3)

    assert_eq_with_retry(node2, "select count() from test_table", str(current_count + 2))

    assert not node2.contains_in_log("No message received"), "Found 'No message received' in clickhouse-server.log"
コード例 #35
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_both_http(both_http_cluster):
    node3.query("insert into test_table values ('2017-06-16', 111, 0)")

    assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '111')
    assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111')

    node4.query("insert into test_table values ('2017-06-17', 222, 1)")

    assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '111\n222')
    assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111\n222')
コード例 #36
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_mixed_protocol(mixed_protocol_cluster):
    node5.query("insert into test_table values ('2017-06-16', 111, 0)")

    assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '111')
    assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '')

    node6.query("insert into test_table values ('2017-06-17', 222, 1)")

    assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '111')
    assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '222')
コード例 #37
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_multiple_endpoint_connections_count(start_big_cluster):

    def task(count):
        print("Inserting ten times from {}".format(count))
        if (count / 10) % 2 == 1:
            node = node3
        else:
            node = node4

        for i in xrange(count, count + 10):
            node.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))

    p = Pool(10)
    p.map(task, xrange(0, 100, 10))

    assert_eq_with_retry(node3, "select count() from test_table", "100")
    assert_eq_with_retry(node4, "select count() from test_table", "100")
    assert_eq_with_retry(node5, "select count() from test_table", "100")

    # two per each host
    assert node5.query("SELECT value FROM system.events where event='CreatedHTTPConnections'") == '4\n'
コード例 #38
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_alter_table_drop_partition(started_cluster, table, query, expected, n1, n2):
    to_insert = '''\
2017-06-16	111	0
2017-06-16	222	1
2017-06-16	333	2
2017-07-16	444	3
'''
    n1.query("INSERT INTO {} FORMAT TSV".format(table), stdin=to_insert, user='******')

    assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), '4', user='******')
    assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), '4', user='******')

    ### It maybe leader and everything will be ok
    n1.query(query, user='******')

    assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), expected, user='******')
    assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), expected, user='******')

    n1.query("INSERT INTO {} FORMAT TSV".format(table), stdin=to_insert, user='******')

    assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), '4', user='******')
    assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), '4', user='******')

    ### If node1 is leader than node2 will be slave
    n2.query(query, user='******')

    assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), expected, user='******')
    assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), expected, user='******')
コード例 #39
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test(started_cluster):
    # Check that the data has been inserted into correct tables.
    assert_eq_with_retry(node1, "SELECT id FROM shard_0.replicated", '111')
    assert_eq_with_retry(node1, "SELECT id FROM shard_2.replicated", '333')

    assert_eq_with_retry(node2, "SELECT id FROM shard_0.replicated", '111')
    assert_eq_with_retry(node2, "SELECT id FROM shard_1.replicated", '222')

    assert_eq_with_retry(node3, "SELECT id FROM shard_1.replicated", '222')
    assert_eq_with_retry(node3, "SELECT id FROM shard_2.replicated", '333')

    # Check that SELECT from the Distributed table works.
    expected_from_distributed = '''\
2017-06-16	111	0
2017-06-16	222	1
2017-06-16	333	2
'''
    assert_eq_with_retry(node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)
    assert_eq_with_retry(node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)
    assert_eq_with_retry(node3, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)

    # Now isolate node3 from other nodes and check that SELECTs on other nodes still work.
    with PartitionManager() as pm:
        pm.partition_instances(node3, node1, action='REJECT --reject-with tcp-reset')
        pm.partition_instances(node3, node2, action='REJECT --reject-with tcp-reset')

        assert_eq_with_retry(node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)
        assert_eq_with_retry(node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed)

        with pytest.raises(Exception):
            print node3.query_with_retry("SELECT * FROM distributed ORDER BY id", retry_count=5)
コード例 #40
0
ファイル: test.py プロジェクト: chipitsine/ClickHouse
def test_drop_failover(drop_failover):
    node3.query("insert into test_table values ('2017-06-16', 111, 0)")
    node3.query("insert into real_table values ('2017-06-16', 222, 0)")

    assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '111')
    assert_eq_with_retry(node3, "SELECT id FROM real_table order by id", '222')
    assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111')


    with PartitionManager() as pm:
        # Hinder replication between replicas
        pm.partition_instances(node3, node4, port=9009)
        # Disconnect Node4 from zookeper
        pm.drop_instance_zk_connections(node4)

        node3.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table")

        # Node3 replace is ok
        assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '222')
        # Network interrupted -- replace is not ok, but it's ok
        assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111')

        #Drop partition on source node
        node3.query("ALTER TABLE test_table DROP PARTITION 201706")

    # connection restored

    node4.query_with_retry("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'", check_callback=lambda x: 'Not found part' not in x, sleep_time=1)
    assert 'Not found part' not in node4.query("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'")
    assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '')