Beispiel #1
0
def setup_module(module):
    global DICTIONARIES
    global cluster
    global node

    dict_configs_path = os.path.join(SCRIPT_DIR, 'configs/dictionaries')
    for f in os.listdir(dict_configs_path):
        os.remove(os.path.join(dict_configs_path, f))

    for layout in LAYOUTS:
        for source in SOURCES:
            if source.compatible_with_layout(layout):
                structure = DictionaryStructure(layout, FIELDS[layout.layout_type])
                dict_name = source.name + "_" + layout.name
                dict_path = os.path.join(dict_configs_path, dict_name + '.xml')
                dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name)
                dictionary.generate_config()
                DICTIONARIES.append(dictionary)
            else:
                print "Source", source.name, "incompatible with layout", layout.name

    main_configs = []
    for fname in os.listdir(dict_configs_path):
        main_configs.append(os.path.join(dict_configs_path, fname))
    cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
    node = cluster.add_instance('node', main_configs=main_configs, with_mysql=True, with_mongo=True)
    cluster.add_instance('clickhouse1')
Beispiel #2
0
def setup_module(module):
    global cluster
    global instance
    global test_table

    structure = generate_structure()
    dictionary_files = generate_dictionaries(os.path.join(SCRIPT_DIR, 'configs/dictionaries'), structure)

    cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
    instance = cluster.add_instance('instance', main_configs=dictionary_files)
    test_table = DictionaryTestTable(os.path.join(SCRIPT_DIR, 'configs/dictionaries/source.tsv'))
Beispiel #3
0
def started_cluster():
    try:
        cluster = ClickHouseCluster(__file__)
        instance = cluster.add_instance('dummy', clickhouse_path_dir='clickhouse_path')
        cluster.start()

        cluster_fail = ClickHouseCluster(__file__, name='fail')
        instance_fail = cluster_fail.add_instance('dummy_fail', clickhouse_path_dir='clickhouse_path_fail')
        with pytest.raises(Exception):
            cluster_fail.start()

        yield cluster

    finally:
        cluster.shutdown()
Beispiel #4
0
def started_cluster():
    global cluster
    try:
        clusters_schema = {
         "0" : {
            "0" : ["0", "1"],
            "1" : ["0"]
         },
         "1" : {
            "0" : ["0", "1"],
            "1" : ["0"]
         }
        }

        cluster = ClickHouseCluster(__file__)

        for cluster_name, shards in clusters_schema.iteritems():
            for shard_name, replicas in shards.iteritems():
                for replica_name in replicas:
                    name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
                    cluster.add_instance(name,
                        config_dir="configs",
                        macroses={"cluster": cluster_name, "shard": shard_name, "replica": replica_name},
                        with_zookeeper=True)

        cluster.start()
        yield cluster

    finally:
        pass
        cluster.shutdown()
Beispiel #5
0
def test_different_user():
    current_user_id = os.getuid()

    if current_user_id != 0:
        return

    other_user_id = pwd.getpwnam('nobody').pw_uid

    cluster = ClickHouseCluster(__file__)
    node = cluster.add_instance('node')

    cluster.start()

    docker_api = docker.from_env().api
    container = node.get_docker_handle()
    container.stop()
    container.start()
    container.exec_run('chown {} /var/lib/clickhouse'.format(other_user_id), privileged=True)
    container.exec_run(CLICKHOUSE_START_COMMAND)

    cluster.shutdown() # cleanup

    with open(os.path.join(node.path, 'logs/clickhouse-server.err.log')) as log:
        expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\."
        last_message = log.readlines()[-1].strip()

        if re.search(expected_message, last_message) is None:
            pytest.fail('Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, last_message))
Beispiel #6
0
def started_cluster():
    global cluster
    global instance
    try:
        cluster = ClickHouseCluster(__file__)
        cluster.add_instance('ch1', config_dir="configs")
        cluster.start()

        instance = cluster.instances['ch1']
        instance.query('CREATE DATABASE dictionaries ENGINE = Dictionary')
        instance.query('CREATE TABLE dictionary_source (id UInt64, value UInt8) ENGINE = Memory')
        #print instance.query('SELECT * FROM system.dictionaries FORMAT Vertical')
        print "Started ", instance.ip_address

        yield cluster

    finally:
        pass
        cluster.shutdown()
Beispiel #7
0
        node.query('''
                CREATE DATABASE test;
    
                CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}')
                PARTITION BY date
                ORDER BY id
                SETTINGS
                    replicated_max_parallel_fetches_for_host={connections},
                    index_granularity=8192;
            '''.format(shard=shard,
                       replica=node.name,
                       connections=connections_count))


cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
                             user_configs=[],
                             main_configs=['configs/remote_servers.xml'],
                             with_zookeeper=True)
node2 = cluster.add_instance('node2',
                             user_configs=[],
                             main_configs=['configs/remote_servers.xml'],
                             with_zookeeper=True)


@pytest.fixture(scope="module")
def start_small_cluster():
    try:
        cluster.start()
Beispiel #8
0
def test_chroot_with_different_root():
    cluster_1 = ClickHouseCluster(
        __file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
    cluster_2 = ClickHouseCluster(
        __file__, zookeeper_config_path='configs/zookeeper_config_root_b.xml')

    node1 = cluster_1.add_instance('node1',
                                   main_configs=[
                                       "configs/remote_servers.xml",
                                       "configs/zookeeper_config_root_a.xml"
                                   ],
                                   with_zookeeper=True,
                                   zookeeper_use_tmpfs=False)
    node2 = cluster_2.add_instance('node2',
                                   main_configs=[
                                       "configs/remote_servers.xml",
                                       "configs/zookeeper_config_root_b.xml"
                                   ],
                                   with_zookeeper=True,
                                   zookeeper_use_tmpfs=False)
    nodes = [node1, node2]

    def create_zk_roots(zk):
        zk.ensure_path('/root_a')
        zk.ensure_path('/root_b')
        print(zk.get_children('/'))

    cluster_1.add_zookeeper_startup_command(create_zk_roots)

    try:
        cluster_1.start()

        try:
            cluster_2.start(destroy_dirs=False)

            for i, node in enumerate(nodes):
                node.query('''
                CREATE TABLE simple (date Date, id UInt32)
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
                '''.format(replica=node.name))
                for j in range(2):  # Second insert to test deduplication
                    node.query(
                        "INSERT INTO simple VALUES ({0}, {0})".format(i))

            assert node1.query('select count() from simple').strip() == '1'
            assert node2.query('select count() from simple').strip() == '1'

        finally:
            cluster_2.shutdown()

    finally:
        cluster_1.shutdown()
Beispiel #9
0
gen_dir = os.path.join(SCRIPT_DIR, './_gen')
os.makedirs(gen_dir, exist_ok=True)
run_and_check(
    'python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \
    {proto_dir}/clickhouse_grpc.proto'.format(proto_dir=proto_dir,
                                              gen_dir=gen_dir),
    shell=True)

sys.path.append(gen_dir)
import clickhouse_grpc_pb2
import clickhouse_grpc_pb2_grpc

# Utilities

config_dir = os.path.join(SCRIPT_DIR, './configs')
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=['configs/grpc_config.xml'])
grpc_port = 9100
main_channel = None


def create_channel():
    node_ip_with_grpc_port = cluster.get_instance_ip('node') + ':' + str(
        grpc_port)
    channel = grpc.insecure_channel(node_ip_with_grpc_port)
    grpc.channel_ready_future(channel).result(timeout=10)
    global main_channel
    if not main_channel:
        main_channel = channel
    return channel
Beispiel #10
0
import time
import os
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from helpers.client import CommandRequest


cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 1})
node2 = cluster.add_instance('node2', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 2})
nodes = [node1, node2]

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        yield cluster

    finally:
        pass
        cluster.shutdown()


def test_random_inserts(started_cluster):
    # Duration of the test, reduce it if don't want to wait
Beispiel #11
0
def test_identity():

    cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_password.xml')
    cluster_2 = ClickHouseCluster(__file__)

    node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
    node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)

    try:
        cluster_1.start()

        node1.query('''
        CREATE TABLE simple (date Date, id UInt32)
        ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
        '''.format(replica=node1.name))

        with pytest.raises(Exception):
            cluster_2.start(destroy_dirs=False)
            node2.query('''
            CREATE TABLE simple (date Date, id UInt32) 
            ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192);
            ''')

    finally:
        cluster_1.shutdown()
        cluster_2.shutdown()
Beispiel #12
0
import time
import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_zookeeper=True)
node2 = cluster.add_instance('node2', with_zookeeper=True)

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster

    except Exception as ex:
        print ex

    finally:
        cluster.shutdown()

def drop_table(nodes, table_name):
    for node in nodes:
        node.query("DROP TABLE IF EXISTS {}".format(table_name))

def test_ttl_columns(start_cluster):
    drop_table([node1, node2], "test_ttl")
    for node in [node1, node2]:
        node.query(
Beispiel #13
0
import pytest
import os
from helpers.cluster import ClickHouseCluster

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
    "node",
    main_configs=[
        "configs/first.crt",
        "configs/first.key",
        "configs/second.crt",
        "configs/second.key",
        "configs/ECcert.crt",
        "configs/ECcert.key",
        "configs/WithPassPhrase.crt",
        "configs/WithPassPhrase.key",
        "configs/cert.xml",
    ],
)
PASS_PHRASE_TEMPLATE = """<privateKeyPassphraseHandler>
                <name>KeyFileHandler</name>
                <options>
                <password>{pass_phrase}</password>
                </options>
            </privateKeyPassphraseHandler>
"""


@pytest.fixture(scope="module", autouse=True)
def started_cluster():
Beispiel #14
0
#!/usr/bin/env python3
import time

import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True)
node2 = cluster.add_instance("node2", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True)
node3 = cluster.add_instance("node3", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()

def test_ttl_move_and_s3(started_cluster):
    for i, node in enumerate([node1, node2, node3]):
        node.query(
            """
            CREATE TABLE s3_test_with_ttl (date DateTime, id UInt32, value String)
            ENGINE=ReplicatedMergeTree('/clickhouse/tables/s3_test', '{}')
            ORDER BY id
            PARTITION BY id
            TTL date TO DISK 's3_disk'
            SETTINGS storage_policy='s3_and_default'
            """.format(i))
Beispiel #15
0
import time
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', user_configs=['configs/config_no_substs.xml']) # hardcoded value 33333
node2 = cluster.add_instance('node2', user_configs=['configs/config_env.xml'], env_variables={"MAX_QUERY_SIZE": "55555"})
node3 = cluster.add_instance('node3', user_configs=['configs/config_zk.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', user_configs=['configs/config_incl.xml'], main_configs=['configs/max_query_size.xml']) # include value 77777
node5 = cluster.add_instance('node5', user_configs=['configs/config_allow_databases.xml'])
node6 = cluster.add_instance('node6', user_configs=['configs/config_include_from_env.xml'], env_variables={"INCLUDE_FROM_ENV": "/etc/clickhouse-server/config.d/max_query_size.xml"}, main_configs=['configs/max_query_size.xml'])

@pytest.fixture(scope="module")
def start_cluster():
    try:
        def create_zk_roots(zk):
            zk.create(path="/setting/max_query_size", value="77777", makepath=True)
        cluster.add_zookeeper_startup_command(create_zk_roots)

        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()

def test_config(start_cluster):
   assert node1.query("select value from system.settings where name = 'max_query_size'") == "33333\n"
   assert node2.query("select value from system.settings where name = 'max_query_size'") == "55555\n"
   assert node3.query("select value from system.settings where name = 'max_query_size'") == "77777\n"
   assert node4.query("select value from system.settings where name = 'max_query_size'") == "99999\n"
   assert node6.query("select value from system.settings where name = 'max_query_size'") == "99999\n"
Beispiel #16
0
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1')
node2 = cluster.add_instance('node2')


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        for node in [node1, node2]:
            node.query("""
                CREATE TABLE test_table(
                    APIKey UInt32,
                    CustomAttributeId UInt64,
                    ProfileIDHash UInt64,
                    DeviceIDHash UInt64,
                    Data String)
                ENGINE = SummingMergeTree()
                ORDER BY (APIKey, CustomAttributeId, ProfileIDHash, DeviceIDHash, intHash32(DeviceIDHash))
            """)
        yield cluster

    finally:
        cluster.shutdown()

Beispiel #17
0
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance("node",
                            main_configs=["configs/config.xml"],
                            with_zookeeper=True)


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()


def create_force_drop_flag(node):
    force_drop_flag_path = "/var/lib/clickhouse/flags/force_drop_table"
    node.exec_in_container(
        [
            "bash",
            "-c",
            "touch {} && chmod a=rw {}".format(force_drop_flag_path,
                                               force_drop_flag_path),
        ],
        user="******",
    )
Beispiel #18
0
import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance')
q = instance.query
path_to_data = '/var/lib/clickhouse/'


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        q('CREATE DATABASE test ENGINE = Ordinary')

        yield cluster

    finally:
        cluster.shutdown()


@pytest.fixture
def partition_table_simple(started_cluster):
    q("DROP TABLE IF EXISTS test.partition")
    q("CREATE TABLE test.partition (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) "
      "ENGINE=MergeTree PARTITION BY date SAMPLE BY sample_key ORDER BY (date,x,sample_key) "
      "SETTINGS index_granularity=8192, index_granularity_bytes=0")
    q("INSERT INTO test.partition ( x ) VALUES ( now() )")
    q("INSERT INTO test.partition ( x ) VALUES ( now()+1 )")
Beispiel #19
0
import os
import time

import pytest
from helpers.client import QueryTimeoutExceedException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DICTIONARY_FILES = [
    'configs/dictionaries/cache_xypairs.xml',
    'configs/dictionaries/executable.xml', 'configs/dictionaries/file.xml',
    'configs/dictionaries/file.txt', 'configs/dictionaries/slow.xml'
]

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', dictionaries=DICTIONARY_FILES)


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance.query("CREATE DATABASE IF NOT EXISTS test")

        yield cluster

    finally:
        cluster.shutdown()

Beispiel #20
0
## sudo -H pip install PyMySQL
import pymysql.cursors
import pytest
from helpers.cluster import ClickHouseCluster
import time
import logging

DICTS = [
    'configs/dictionaries/mysql_dict1.xml',
    'configs/dictionaries/mysql_dict2.xml'
]
CONFIG_FILES = ['configs/remote_servers.xml']
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                main_configs=CONFIG_FILES,
                                with_mysql=True,
                                dictionaries=DICTS)

create_table_mysql_template = """
    CREATE TABLE IF NOT EXISTS `test`.`{}` (
        `id` int(11) NOT NULL,
        `value` varchar(50) NOT NULL,
        PRIMARY KEY (`id`)
    ) ENGINE=InnoDB;
    """

create_clickhouse_dictionary_table_template = """
    CREATE TABLE IF NOT EXISTS `test`.`dict_table_{}` (`id` UInt64, `value` String) ENGINE = Dictionary({})
    """

Beispiel #21
0
import pytest
import time

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV


cluster = ClickHouseCluster(__file__)

instance_test_reconnect = cluster.add_instance('instance_test_reconnect', main_configs=['configs/remote_servers.xml'])
instance_test_inserts_batching = cluster.add_instance(
    'instance_test_inserts_batching',
    main_configs=['configs/remote_servers.xml'], user_configs=['configs/enable_distributed_inserts_batching.xml'])
remote = cluster.add_instance('remote', user_configs=['configs/forbid_background_merges.xml'])

instance_test_inserts_local_cluster = cluster.add_instance(
    'instance_test_inserts_local_cluster',
    main_configs=['configs/remote_servers.xml'])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        remote.query("CREATE TABLE local1 (x UInt32) ENGINE = Log")
        instance_test_reconnect.query('''
CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local1')
''')
Beispiel #22
0
import time

import psycopg2
import pymysql.cursors
import pytest
import logging
import os.path

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from multiprocessing.dummy import Pool

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
    'node1',
    with_odbc_drivers=True,
    with_mysql=True,
    with_postgres=True,
    main_configs=['configs/openssl.xml', 'configs/odbc_logging.xml'],
    dictionaries=[
        'configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml',
        'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml',
        'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'
    ],
    stay_alive=True)

drop_table_sql_template = """
    DROP TABLE IF EXISTS `clickhouse`.`{}`
    """
Beispiel #23
0
import pytest
import time

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager


cluster = ClickHouseCluster(__file__)

instance_with_dist_table = cluster.add_instance('instance_with_dist_table', main_configs=['configs/remote_servers.xml'])
replica1 = cluster.add_instance('replica1', with_zookeeper=True)
replica2 = cluster.add_instance('replica2', with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for replica in (replica1, replica2):
            replica.query(
                "CREATE TABLE replicated (d Date, x UInt32) ENGINE = "
                "ReplicatedMergeTree('/clickhouse/tables/replicated', '{instance}', d, d, 8192)")

        instance_with_dist_table.query(
            "CREATE TABLE distributed (d Date, x UInt32) ENGINE = "
            "Distributed('test_cluster', 'default', 'replicated')")

        yield cluster

    finally:
        cluster.shutdown()
Beispiel #24
0
def started_cluster():
    try:
        cluster = ClickHouseCluster(__file__)
        cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
                             with_minio=True)
        cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml"])
        cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
                             user_configs=["configs/s3_max_redirects.xml"])
        logging.info("Starting cluster...")
        cluster.start()
        logging.info("Cluster started")

        prepare_s3_bucket(cluster)
        logging.info("S3 bucket created")
        run_s3_mocks(cluster)

        yield cluster
    finally:
        cluster.shutdown()
Beispiel #25
0
def test_chroot_with_same_root():

    cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
    cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')

    node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
    node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
    nodes = [node1, node2]

    def create_zk_root(zk):
        zk.ensure_path('/root_a')
        print(zk.get_children('/'))
    cluster_1.add_zookeeper_startup_command(create_zk_root)

    try:
        cluster_1.start()

        try:
            cluster_2.start(destroy_dirs=False)
            for i, node in enumerate(nodes):
                node.query('''
                CREATE TABLE simple (date Date, id UInt32)
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
                '''.format(replica=node.name))
                for j in range(2): # Second insert to test deduplication
                    node.query("INSERT INTO simple VALUES ({0}, {0})".format(i))

            time.sleep(1)

            assert node1.query('select count() from simple').strip() == '2'
            assert node2.query('select count() from simple').strip() == '2'

        finally:
            cluster_2.shutdown()

    finally:
        cluster_1.shutdown()
Beispiel #26
0
import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1',
                             image='yandex/clickhouse-server',
                             tag='20.8.11.17',
                             with_installed_binary=True,
                             stay_alive=True)


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()


def test_default_codec_read(start_cluster):
    node1.query("DROP TABLE IF EXISTS test_18340")

    node1.query("""
        CREATE TABLE test_18340
        (
            `lns` LowCardinality(Nullable(String)),
Beispiel #27
0
import pytest
import time
import os, sys

sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import helpers

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager


cluster = ClickHouseCluster(__file__)

# Cluster with 2 shards of 2 replicas each. node_1_1 is the instance with Distributed table.
# Thus we have a shard with a local replica and a shard with remote replicas.
node_1_1 = instance_with_dist_table = cluster.add_instance(
    'node_1_1', with_zookeeper=True, main_configs=['configs/remote_servers.xml'])
node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True)
node_2_1 = cluster.add_instance('node_2_1', with_zookeeper=True)
node_2_2 = cluster.add_instance('node_2_2', with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for shard in (1, 2):
            for replica in (1, 2):
                node = cluster.instances['node_{}_{}'.format(shard, replica)]
                node.query('''
CREATE TABLE replicated (d Date, x UInt32) ENGINE =
Beispiel #28
0
import time

import pytest
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV

cluster = ClickHouseCluster(__file__)

instance_test_reconnect = cluster.add_instance(
    "instance_test_reconnect", main_configs=["configs/remote_servers.xml"])
instance_test_inserts_batching = cluster.add_instance(
    "instance_test_inserts_batching",
    main_configs=["configs/remote_servers.xml"],
    user_configs=["configs/enable_distributed_inserts_batching.xml"],
)
remote = cluster.add_instance(
    "remote", main_configs=["configs/forbid_background_merges.xml"])

instance_test_inserts_local_cluster = cluster.add_instance(
    "instance_test_inserts_local_cluster",
    main_configs=["configs/remote_servers.xml"])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        remote.query("CREATE TABLE local1 (x UInt32) ENGINE = Log")
Beispiel #29
0
import time

import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_restricted.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_restricted.xml'], with_zookeeper=True)
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_allowed.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_allowed.xml'], with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()


        for node in [node1, node2]:
            node.query('''
            CREATE TABLE sometable(date Date, id UInt32, value Int32)
    ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '{replica}', date, id, 8192);
                '''.format(replica=node.name), user='******')

        for node in [node3, node4]:
            node.query('''
            CREATE TABLE someothertable(date Date, id UInt32, value Int32)
    ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/someothertable', '{replica}', date, id, 8192);
                '''.format(replica=node.name), user='******')
Beispiel #30
0
import sys
import time
import uuid

import docker
import psycopg2 as py_psql
import psycopg2.extras
import pytest
from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check

psycopg2.extras.register_uuid()

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DOCKER_COMPOSE_PATH = get_docker_compose_path()

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
    "node",
    main_configs=[
        "configs/postresql.xml",
        "configs/log.xml",
        "configs/ssl_conf.xml",
        "configs/dhparam.pem",
        "configs/server.crt",
        "configs/server.key",
    ],
    user_configs=["configs/default_passwd.xml"],
    env_variables={"UBSAN_OPTIONS": "print_stacktrace=1"},
)

server_port = 5433
Beispiel #31
0
def test_secure_connection():
    # We need absolute path in zookeeper volumes. Generate it dynamically.
    TEMPLATE = '''
    zoo{zoo_id}:
        image: zookeeper:3.6.2
        restart: always
        environment:
            ZOO_TICK_TIME: 500
            ZOO_MY_ID: {zoo_id}
            ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
            ZOO_SECURE_CLIENT_PORT: 2281
        volumes:
           - {helpers_dir}/zookeeper-ssl-entrypoint.sh:/zookeeper-ssl-entrypoint.sh
           - {configs_dir}:/clickhouse-config
        command: ["zkServer.sh", "start-foreground"]
        entrypoint: /zookeeper-ssl-entrypoint.sh
    '''
    configs_dir = p.abspath(p.join(p.dirname(__file__), 'configs_secure'))
    helpers_dir = p.abspath(p.dirname(helpers.__file__))

    cluster = ClickHouseCluster(
        __file__,
        zookeeper_config_path='configs/zookeeper_config_with_ssl.xml')

    docker_compose = NamedTemporaryFile(mode='w+', delete=False)

    docker_compose.write(
        "version: '2.3'\nservices:\n" + TEMPLATE.format(
            zoo_id=1, configs_dir=configs_dir, helpers_dir=helpers_dir) +
        TEMPLATE.format(
            zoo_id=2, configs_dir=configs_dir, helpers_dir=helpers_dir) +
        TEMPLATE.format(
            zoo_id=3, configs_dir=configs_dir, helpers_dir=helpers_dir))
    docker_compose.close()

    node1 = cluster.add_instance(
        'node1',
        main_configs=[
            "configs_secure/client.crt", "configs_secure/client.key",
            "configs_secure/conf.d/remote_servers.xml",
            "configs_secure/conf.d/ssl_conf.xml"
        ],
        with_zookeeper=True,
        zookeeper_docker_compose_path=docker_compose.name,
        zookeeper_use_tmpfs=False,
        use_keeper=False)
    node2 = cluster.add_instance(
        'node2',
        main_configs=[
            "configs_secure/client.crt", "configs_secure/client.key",
            "configs_secure/conf.d/remote_servers.xml",
            "configs_secure/conf.d/ssl_conf.xml"
        ],
        with_zookeeper=True,
        zookeeper_docker_compose_path=docker_compose.name,
        zookeeper_use_tmpfs=False,
        use_keeper=False)

    try:
        cluster.start()

        assert node1.query(
            "SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n'
        assert node2.query(
            "SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n'

        kThreadsNumber = 16
        kIterations = 100
        threads = []
        for _ in range(kThreadsNumber):
            threads.append(
                threading.Thread(target=(lambda: [
                    node1.query(
                        "SELECT count() FROM system.zookeeper WHERE path = '/'"
                    ) for _ in range(kIterations)
                ])))

        for thread in threads:
            thread.start()

        for thread in threads:
            thread.join()

    finally:
        cluster.shutdown()
        unlink(docker_compose.name)
Beispiel #32
0
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
    "node1",
    with_zookeeper=False,
    image="yandex/clickhouse-server",
    tag="19.16.9.37",
    stay_alive=True,
    with_installed_binary=True,
)
node2 = cluster.add_instance(
    "node2",
    with_zookeeper=False,
    image="yandex/clickhouse-server",
    tag="19.16.9.37",
    stay_alive=True,
    with_installed_binary=True,
)
node3 = cluster.add_instance("node3", with_zookeeper=False)


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        yield cluster

    finally:
Beispiel #33
0
#!/usr/bin/env python3

#!/usr/bin/env python3

import pytest
from helpers.cluster import ClickHouseCluster
import random
import string
import os
import time
from multiprocessing.dummy import Pool
from helpers.network import PartitionManager
from helpers.test_tools import assert_eq_with_retry
from kazoo.client import KazooClient, KazooState

cluster = ClickHouseCluster(__file__)
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                          'configs')

node1 = cluster.add_instance('node1',
                             main_configs=['configs/enable_keeper1.xml'],
                             stay_alive=True)
node2 = cluster.add_instance('node2',
                             main_configs=['configs/enable_keeper2.xml'],
                             stay_alive=True)
node3 = cluster.add_instance('node3',
                             main_configs=['configs/enable_keeper3.xml'],
                             stay_alive=True)
node4 = cluster.add_instance('node4', stay_alive=True)

Beispiel #34
0
# pylint: disable=line-too-long
# pylint: disable=unused-argument
# pylint: disable=redefined-outer-name

import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__, name="skipping_indices")
node = cluster.add_instance('node',
                            image='yandex/clickhouse-server',
                            tag='21.6',
                            stay_alive=True,
                            with_installed_binary=True)


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        yield cluster

    finally:
        cluster.shutdown()


# TODO: cover other types too, but for this we need to add something like
# restart_with_tagged_version(), since right now it is not possible to
# switch to old tagged clickhouse version.
def test_index(start_cluster):
    node.query("""
    CREATE TABLE data
Beispiel #35
0
import os
import subprocess
import time

import pymysql.cursors
import pytest
from helpers.cluster import ClickHouseCluster, get_docker_compose_path

from . import materialize_with_ddl

DOCKER_COMPOSE_PATH = get_docker_compose_path()

cluster = ClickHouseCluster(__file__)
clickhouse_node = cluster.add_instance('node1', user_configs=["configs/users.xml"], with_mysql=False)


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()


class MySQLNodeInstance:
    def __init__(self, user='******', password='******', hostname='127.0.0.1', port=3308):
        self.user = user
        self.port = port
        self.hostname = hostname
        self.password = password
Beispiel #36
0
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', with_zookeeper=True)


@pytest.fixture(scope='module', autouse=True)
def started_cluster():
    try:
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()


def test_replicated_engine_parse_metadata_on_attach():
    node.query('''
        CREATE TABLE data (
            key Int,
            INDEX key_idx0 key+0 TYPE minmax GRANULARITY 1,
            INDEX key_idx1 key+1 TYPE minmax GRANULARITY 1
        )
        ENGINE = ReplicatedMergeTree('/ch/tables/default/data', 'node')
        ORDER BY key;
        ''')
    node.query('DETACH TABLE data')

    zk = cluster.get_kazoo_client('zoo1')
    # Add **extra space between indices**, to check that it will be re-parsed
Beispiel #37
0
import pytest
import time

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager


cluster = ClickHouseCluster(__file__)

instance_with_dist_table = cluster.add_instance('instance_with_dist_table', main_configs=['configs/remote_servers.xml'])
remote = cluster.add_instance('remote')

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        remote.query("CREATE TABLE local (x UInt32) ENGINE = Log")

        instance_with_dist_table.query('''
CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local')
''')

        yield cluster

    finally:
        cluster.shutdown()


def test_reconnect(started_cluster):
    with PartitionManager() as pm:
Beispiel #38
0
import time
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir="configs", with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", with_zookeeper=True)
node3 = cluster.add_instance('node3', config_dir="configs", with_zookeeper=True)
node4 = cluster.add_instance('node4', config_dir="configs", with_zookeeper=True)

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        for node, shard in [(node1, 1), (node2, 1), (node3, 2), (node4, 2)]:
            node.query(
            '''
                CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}')
                PARTITION BY date
                ORDER BY id
            '''.format(shard=shard, replica=node.name), settings={"password": "******"})

        yield cluster

    finally:
        cluster.shutdown()

def test_truncate(start_cluster):
    node1.query("insert into test_table values ('2019-02-15', 1, 2), ('2019-02-15', 2, 3), ('2019-02-15', 3, 4)", settings={"password": "******"})
Beispiel #39
0
"""
protoc --version
libprotoc 3.0.0

# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
import kafka_pb2


# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for mat. view is working.
# TODO: add test for SELECT LIMIT is working.
# TODO: modify tests to respect `skip_broken_messages` setting.

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                main_configs=['configs/kafka.xml'],
                                with_kafka=True,
                                clickhouse_path_dir='clickhouse_path')
kafka_id = ''


# Helpers

def check_kafka_is_available():
    p = subprocess.Popen(('docker',
                          'exec',
                          '-i',
                          kafka_id,
                          '/usr/bin/kafka-broker-api-versions',
Beispiel #40
0
import time
import pytest

import os
import pymysql.cursors
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers.cluster import ClickHouseCluster

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
node1 = cluster.add_instance('node1', with_odbc_drivers=True, with_mysql=True, image='alesapin/ubuntu_with_odbc', main_configs=['configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml', 'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml', 'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'], stay_alive=True)

create_table_sql_template =   """
    CREATE TABLE `clickhouse`.`{}` (
    `id` int(11) NOT NULL,
    `name` varchar(50) NOT NULL,
    `age` int  NOT NULL default 0,
    `money` int NOT NULL default 0,
    PRIMARY KEY (`id`)) ENGINE=InnoDB;
    """
def get_mysql_conn():
    conn = pymysql.connect(user='******', password='******', host='127.0.0.1', port=3308)
    return conn

def create_mysql_db(conn, name):
    with conn.cursor() as cursor:
        cursor.execute(
            "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
Beispiel #41
0
import time
import pytest
import string
import random

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/zstd_compression_by_default.xml'])
node2 = cluster.add_instance('node2', main_configs=['configs/lz4hc_compression_by_default.xml'])
node3 = cluster.add_instance('node3', main_configs=['configs/custom_compression_by_default.xml'])

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()


def test_preconfigured_default_codec(start_cluster):
    for node in [node1, node2]:
        node.query("""
        CREATE TABLE compression_codec_multiple_with_key (
            somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
            id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
            data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4),
            somecolumn Float64
Beispiel #42
0
import os.path as p
import time
import datetime
import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV


cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', main_configs=['configs/graphite_rollup.xml'])

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance.query('CREATE DATABASE test')

        yield cluster

    finally:
        cluster.shutdown()

@pytest.fixture
def graphite_table(started_cluster):
    instance.query('''
DROP TABLE IF EXISTS test.graphite;
CREATE TABLE test.graphite
    (metric String, value Float64, timestamp UInt32, date Date, updated UInt32)
    ENGINE = GraphiteMergeTree(date, (metric, timestamp), 8192, 'graphite_rollup');
''')
Beispiel #43
0
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                config_dir="configs")




@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        yield cluster

    finally:
        cluster.shutdown()


def test_read_only_constraint(started_cluster):
    # Change a setting for session with SET.
    assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
           "0\n"

    expected_error = "Setting force_index_by_date should not be changed"
    assert expected_error in instance.query_and_get_error("SET force_index_by_date=1")

    # Change a setting for query with SETTINGS.
    assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
           "0\n"
Beispiel #44
0
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                clickhouse_path_dir='clickhouse_path')


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance.query('CREATE DATABASE test')
        yield cluster

    finally:
        cluster.shutdown()


def create_simple_table():
    instance.query("DROP TABLE IF EXISTS test.simple")
    instance.query('''
        CREATE TABLE test.simple (key UInt64, value String)
            ENGINE = MergeTree ORDER BY tuple();
        ''')


def test_protobuf_format_input(started_cluster):
    create_simple_table()
    instance.http_query(
        "INSERT INTO test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'",
Beispiel #45
0
import pytest
import time
import os, sys

sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import helpers

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager

cluster = ClickHouseCluster(__file__)

# Cluster with 2 shards of 2 replicas each. node_1_1 is the instance with Distributed table.
# Thus we have a shard with a local replica and a shard with remote replicas.
node_1_1 = instance_with_dist_table = cluster.add_instance(
    'node_1_1',
    with_zookeeper=True,
    main_configs=['configs/remote_servers.xml'])
node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True)
node_2_1 = cluster.add_instance('node_2_1', with_zookeeper=True)
node_2_2 = cluster.add_instance('node_2_2', with_zookeeper=True)


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for shard in (1, 2):
            for replica in (1, 2):
                node = cluster.instances['node_{}_{}'.format(shard, replica)]
Beispiel #46
0
import os
import re
import time

import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry, TSV

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance(
    'instance',
    user_configs=[
        "configs/users.d/assign_myquota_to_default_user.xml",
        "configs/users.d/drop_default_quota.xml",
        "configs/users.d/myquota.xml", "configs/users.d/user_with_no_quota.xml"
    ])


def check_system_quotas(canonical):
    canonical_tsv = TSV(canonical)
    r = TSV(instance.query("SELECT * FROM system.quotas ORDER BY name"))
    print(("system_quotas: {},\ncanonical: {}".format(r, TSV(canonical_tsv))))
    assert r == canonical_tsv


def system_quota_limits(canonical):
    canonical_tsv = TSV(canonical)
    r = TSV(
        instance.query(
            "SELECT * FROM system.quota_limits ORDER BY quota_name, duration"))
    print(("system_quota_limits: {},\ncanonical: {}".format(
Beispiel #47
0
from contextlib import contextmanager

import pytest

## sudo -H pip install PyMySQL
import pymysql.cursors

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql = True)
create_table_sql_template =   """
    CREATE TABLE `clickhouse`.`{}` (
    `id` int(11) NOT NULL,
    `name` varchar(50) NOT NULL,
    `age` int  NOT NULL default 0,
    `money` int NOT NULL default 0,
    PRIMARY KEY (`id`)) ENGINE=InnoDB;
    """

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        conn = get_mysql_conn()
        ## create mysql db and table
        create_mysql_db(conn, 'clickhouse')
        yield cluster
Beispiel #48
0
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1',
                             main_configs=['configs/remote_servers.xml'])
node2 = cluster.add_instance('node2',
                             main_configs=['configs/remote_servers.xml'])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for node in (node1, node2):
            node.query('''
CREATE TABLE local_table(id UInt32, val String) ENGINE = TinyLog;
''')

        node1.query("INSERT INTO local_table VALUES (1, 'node1')")
        node2.query("INSERT INTO local_table VALUES (2, 'node2')")

        node1.query('''
CREATE TABLE distributed_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table);
CREATE TABLE merge_table(id UInt32, val String) ENGINE = Merge(default, '^distributed_table')
''')
Beispiel #49
0
import time
import pytest
import requests
from tempfile import NamedTemporaryFile
from helpers.hdfs_api import HDFSApi

import os

from helpers.cluster import ClickHouseCluster
import subprocess


SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_hdfs=True, config_dir="configs", main_configs=['configs/log_conf.xml'])

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        yield cluster

    except Exception as ex:
        print(ex)
        raise ex
    finally:
        cluster.shutdown()

def test_read_write_storage(started_cluster):
Beispiel #50
0
import os
import os.path as p
import time
import pwd
import re
import pymysql.cursors
import pytest
from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check
import docker
import logging

from . import materialize_with_ddl

DOCKER_COMPOSE_PATH = get_docker_compose_path()

cluster = ClickHouseCluster(__file__)
mysql_node = None
mysql8_node = None

node_db_ordinary = cluster.add_instance('node1',
                                        user_configs=["configs/users.xml"],
                                        with_mysql=True,
                                        stay_alive=True)
node_db_atomic = cluster.add_instance(
    'node2',
    user_configs=["configs/users_db_atomic.xml"],
    with_mysql8=True,
    stay_alive=True)
node_disable_bytes_settings = cluster.add_instance(
    'node3',
    user_configs=["configs/users_disable_bytes_settings.xml"],
Beispiel #51
0
def _fill_nodes(nodes, shard, connections_count):
    for node in nodes:
        node.query(
        '''
            CREATE DATABASE test;

            CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
            ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}')
            PARTITION BY date
            ORDER BY id
            SETTINGS
                replicated_max_parallel_fetches_for_host={connections},
                index_granularity=8192;
        '''.format(shard=shard, replica=node.name, connections=connections_count))

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)

@pytest.fixture(scope="module")
def start_small_cluster():
    try:
        cluster.start()

        _fill_nodes([node1, node2], 1, 1)

        yield cluster

    finally:
        cluster.shutdown()
Beispiel #52
0
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance("node1",
                             main_configs=["configs/clusters.xml"],
                             with_zookeeper=True)
node2 = cluster.add_instance("node2",
                             main_configs=["configs/clusters.xml"],
                             with_zookeeper=True)
node3 = cluster.add_instance("node3",
                             main_configs=["configs/clusters.xml"],
                             with_zookeeper=True)
node4 = cluster.add_instance(
    "node4",
    main_configs=["configs/clusters.xml"],
    image="yandex/clickhouse-server",
    tag="21.5",
    with_zookeeper=True,
)


def insert_data(node, table_name):
    node.query("""INSERT INTO {}
                VALUES (bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));"""
               .format(table_name))


@pytest.fixture(scope="module")
Beispiel #53
0
import time
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV


cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        node_to_shards = [
            (node1, [0, 2]),
            (node2, [0, 1]),
            (node3, [1, 2]),
        ]

        for node, shards in node_to_shards:
            for shard in shards:
                node.query('''
CREATE DATABASE shard_{shard};
Beispiel #54
0
import os
import shutil
import time
import re
import pytest
import threading

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry, assert_logs_contain
from helpers.network import PartitionManager

test_recover_staled_replica_run = 1

cluster = ClickHouseCluster(__file__)

main_node = cluster.add_instance(
    "main_node",
    main_configs=["configs/config.xml"],
    user_configs=["configs/settings.xml"],
    with_zookeeper=True,
    stay_alive=True,
    macros={
        "shard": 1,
        "replica": 1
    },
)
dummy_node = cluster.add_instance(
    "dummy_node",
    main_configs=["configs/config.xml"],
    user_configs=["configs/settings.xml"],
    with_zookeeper=True,
Beispiel #55
0
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'])
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'])

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for node in (node1, node2):
            node.query('''
CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id;
CREATE TABLE local_table_2(id UInt32, val String) ENGINE = MergeTree ORDER BY id;
''')

        node1.query("INSERT INTO local_table VALUES (1, 'node1')")
        node2.query("INSERT INTO local_table VALUES (2, 'node2')")

        node1.query('''
CREATE TABLE distributed_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table);
CREATE TABLE distributed_table_2(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table_2);
CREATE TABLE merge_table(id UInt32, val String) ENGINE = Merge(default, '^distributed_table')
''')
Beispiel #56
0
import pytest
import psycopg2

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
from helpers.postgres_utility import get_postgres_conn
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1",
                             main_configs=["configs/named_collections.xml"],
                             with_postgres=True)

postgres_table_template = """
    CREATE TABLE {} (
    id Integer NOT NULL, value Integer, PRIMARY KEY (id))
    """

postgres_drop_table_template = """
    DROP TABLE {}
    """


def create_postgres_db(cursor, name):
    cursor.execute("CREATE DATABASE {}".format(name))


def create_postgres_table(cursor, table_name):
    # database was specified in connection string
    cursor.execute(postgres_table_template.format(table_name))
Beispiel #57
0
import time
import pytest
import string
import random

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/zstd_compression_by_default.xml'])
node2 = cluster.add_instance('node2', main_configs=['configs/lz4hc_compression_by_default.xml'])
node3 = cluster.add_instance('node3', main_configs=['configs/custom_compression_by_default.xml'])
node4 = cluster.add_instance('node4', user_configs=['configs/enable_uncompressed_cache.xml'])
node5 = cluster.add_instance('node5', main_configs=['configs/zstd_compression_by_default.xml'], user_configs=['configs/enable_uncompressed_cache.xml'])

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()


def test_preconfigured_default_codec(start_cluster):
    for node in [node1, node2]:
        node.query("""
        CREATE TABLE compression_codec_multiple_with_key (
            somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
            id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
Beispiel #58
0
# pylint: disable=unused-argument
# pylint: disable=redefined-outer-name
# pylint: disable=line-too-long

import pytest

from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node = cluster.add_instance("node",
                            main_configs=["configs/rocksdb.xml"],
                            stay_alive=True)


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()


def test_valid_options(start_cluster):
    node.query("""
    CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key);
    DROP TABLE test;
    """)
Beispiel #59
0
def test_identity():
    cluster_1 = ClickHouseCluster(
        __file__,
        zookeeper_config_path='configs/zookeeper_config_with_password.xml')
    cluster_2 = ClickHouseCluster(__file__)

    node1 = cluster_1.add_instance(
        'node1',
        main_configs=[
            "configs/remote_servers.xml",
            "configs/zookeeper_config_with_password.xml"
        ],
        with_zookeeper=True,
        zookeeper_use_tmpfs=False)
    node2 = cluster_2.add_instance('node2',
                                   main_configs=["configs/remote_servers.xml"],
                                   with_zookeeper=True,
                                   zookeeper_use_tmpfs=False)

    try:
        cluster_1.start()

        node1.query('''
        CREATE TABLE simple (date Date, id UInt32)
        ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
        '''.format(replica=node1.name))

        with pytest.raises(Exception):
            cluster_2.start(destroy_dirs=False)
            node2.query('''
            CREATE TABLE simple (date Date, id UInt32)
            ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192);
            ''')

    finally:
        cluster_1.shutdown()
        cluster_2.shutdown()
Beispiel #60
0
def test_chroot_with_same_root():

    cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
    cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')

    node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
    node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
    nodes = [node1, node2]

    cluster_1.add_zookeeper_startup_command('create /root_a ""')
    cluster_1.add_zookeeper_startup_command('ls / ')

    try:
        cluster_1.start()

        try:
            cluster_2.start(destroy_dirs=False)
            for i, node in enumerate(nodes):
                node.query('''
                CREATE TABLE simple (date Date, id UInt32) 
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
                '''.format(replica=node.name))
                node.query("INSERT INTO simple VALUES ({0}, {0})".format(i))

            assert node1.query('select count() from simple').strip() == '2'
            assert node2.query('select count() from simple').strip() == '2'

        finally:
            cluster_2.shutdown()

    finally:
        cluster_1.shutdown()