Ejemplo n.º 1
0
def started_cluster():
    global cluster
    try:
        clusters_schema = {
         "0" : {
            "0" : ["0", "1"],
            "1" : ["0"]
         },
         "1" : {
            "0" : ["0", "1"],
            "1" : ["0"]
         }
        }

        cluster = ClickHouseCluster(__file__)

        for cluster_name, shards in clusters_schema.iteritems():
            for shard_name, replicas in shards.iteritems():
                for replica_name in replicas:
                    name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
                    cluster.add_instance(name,
                        config_dir="configs",
                        macroses={"cluster": cluster_name, "shard": shard_name, "replica": replica_name},
                        with_zookeeper=True)

        cluster.start()
        yield cluster

    finally:
        pass
        cluster.shutdown()
Ejemplo n.º 2
0
def setup_module(module):
    global DICTIONARIES
    global cluster
    global node

    dict_configs_path = os.path.join(SCRIPT_DIR, 'configs/dictionaries')
    for f in os.listdir(dict_configs_path):
        os.remove(os.path.join(dict_configs_path, f))

    for layout in LAYOUTS:
        for source in SOURCES:
            if source.compatible_with_layout(layout):
                structure = DictionaryStructure(layout, FIELDS[layout.layout_type])
                dict_name = source.name + "_" + layout.name
                dict_path = os.path.join(dict_configs_path, dict_name + '.xml')
                dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name)
                dictionary.generate_config()
                DICTIONARIES.append(dictionary)
            else:
                print "Source", source.name, "incompatible with layout", layout.name

    main_configs = []
    for fname in os.listdir(dict_configs_path):
        main_configs.append(os.path.join(dict_configs_path, fname))
    cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
    node = cluster.add_instance('node', main_configs=main_configs, with_mysql=True, with_mongo=True)
    cluster.add_instance('clickhouse1')
Ejemplo n.º 3
0
def test_identity():

    cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_password.xml')
    cluster_2 = ClickHouseCluster(__file__)

    node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
    node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)

    try:
        cluster_1.start()

        node1.query('''
        CREATE TABLE simple (date Date, id UInt32)
        ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
        '''.format(replica=node1.name))

        with pytest.raises(Exception):
            cluster_2.start(destroy_dirs=False)
            node2.query('''
            CREATE TABLE simple (date Date, id UInt32) 
            ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192);
            ''')

    finally:
        cluster_1.shutdown()
        cluster_2.shutdown()
Ejemplo n.º 4
0
def test_chroot_with_same_root():

    cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
    cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')

    node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
    node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
    nodes = [node1, node2]

    cluster_1.add_zookeeper_startup_command('create /root_a ""')
    cluster_1.add_zookeeper_startup_command('ls / ')

    try:
        cluster_1.start()

        try:
            cluster_2.start(destroy_dirs=False)
            for i, node in enumerate(nodes):
                node.query('''
                CREATE TABLE simple (date Date, id UInt32) 
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
                '''.format(replica=node.name))
                node.query("INSERT INTO simple VALUES ({0}, {0})".format(i))

            assert node1.query('select count() from simple').strip() == '2'
            assert node2.query('select count() from simple').strip() == '2'

        finally:
            cluster_2.shutdown()

    finally:
        cluster_1.shutdown()
Ejemplo n.º 5
0
def started_cluster():
    try:
        cluster = ClickHouseCluster(__file__)
        instance = cluster.add_instance('dummy', clickhouse_path_dir='clickhouse_path')
        cluster.start()

        cluster_fail = ClickHouseCluster(__file__, name='fail')
        instance_fail = cluster_fail.add_instance('dummy_fail', clickhouse_path_dir='clickhouse_path_fail')
        with pytest.raises(Exception):
            cluster_fail.start()

        yield cluster

    finally:
        cluster.shutdown()
Ejemplo n.º 6
0
def test_different_user():
    current_user_id = os.getuid()

    if current_user_id != 0:
        return

    other_user_id = pwd.getpwnam('nobody').pw_uid

    cluster = ClickHouseCluster(__file__)
    node = cluster.add_instance('node')

    cluster.start()

    docker_api = docker.from_env().api
    container = node.get_docker_handle()
    container.stop()
    container.start()
    container.exec_run('chown {} /var/lib/clickhouse'.format(other_user_id), privileged=True)
    container.exec_run(CLICKHOUSE_START_COMMAND)

    cluster.shutdown() # cleanup

    with open(os.path.join(node.path, 'logs/clickhouse-server.err.log')) as log:
        expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\."
        last_message = log.readlines()[-1].strip()

        if re.search(expected_message, last_message) is None:
            pytest.fail('Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, last_message))
Ejemplo n.º 7
0
def started_cluster():
    global cluster
    global instance
    try:
        cluster = ClickHouseCluster(__file__)
        cluster.add_instance('ch1', config_dir="configs")
        cluster.start()

        instance = cluster.instances['ch1']
        instance.query('CREATE DATABASE dictionaries ENGINE = Dictionary')
        instance.query('CREATE TABLE dictionary_source (id UInt64, value UInt8) ENGINE = Memory')
        #print instance.query('SELECT * FROM system.dictionaries FORMAT Vertical')
        print "Started ", instance.ip_address

        yield cluster

    finally:
        pass
        cluster.shutdown()
Ejemplo n.º 8
0
def setup_module(module):
    global cluster
    global instance
    global test_table

    structure = generate_structure()
    dictionary_files = generate_dictionaries(os.path.join(SCRIPT_DIR, 'configs/dictionaries'), structure)

    cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
    instance = cluster.add_instance('instance', main_configs=dictionary_files)
    test_table = DictionaryTestTable(os.path.join(SCRIPT_DIR, 'configs/dictionaries/source.tsv'))
Ejemplo n.º 9
0
def test_chroot_with_same_root():

    cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
    cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')

    node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
    node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
    nodes = [node1, node2]

    def create_zk_root(zk):
        zk.ensure_path('/root_a')
        print(zk.get_children('/'))
    cluster_1.add_zookeeper_startup_command(create_zk_root)

    try:
        cluster_1.start()

        try:
            cluster_2.start(destroy_dirs=False)
            for i, node in enumerate(nodes):
                node.query('''
                CREATE TABLE simple (date Date, id UInt32)
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
                '''.format(replica=node.name))
                for j in range(2): # Second insert to test deduplication
                    node.query("INSERT INTO simple VALUES ({0}, {0})".format(i))

            time.sleep(1)

            assert node1.query('select count() from simple').strip() == '2'
            assert node2.query('select count() from simple').strip() == '2'

        finally:
            cluster_2.shutdown()

    finally:
        cluster_1.shutdown()
Ejemplo n.º 10
0
import pytest
import time

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV


cluster = ClickHouseCluster(__file__)

instance_test_reconnect = cluster.add_instance('instance_test_reconnect', main_configs=['configs/remote_servers.xml'])
instance_test_inserts_batching = cluster.add_instance(
    'instance_test_inserts_batching',
    main_configs=['configs/remote_servers.xml'], user_configs=['configs/enable_distributed_inserts_batching.xml'])
remote = cluster.add_instance('remote', user_configs=['configs/forbid_background_merges.xml'])

instance_test_inserts_local_cluster = cluster.add_instance(
    'instance_test_inserts_local_cluster',
    main_configs=['configs/remote_servers.xml'])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        remote.query("CREATE TABLE local1 (x UInt32) ENGINE = Log")
        instance_test_reconnect.query('''
CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local1')
''')
Ejemplo n.º 11
0
import os
import time

import pytest
from helpers.cluster import ClickHouseCluster, get_instances_dir

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
    'node', main_configs=["configs/max_table_size_to_drop.xml"])

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

CONFIG_PATH = os.path.join(
    SCRIPT_DIR, './{}/node/configs/config.d/max_table_size_to_drop.xml'.format(
        get_instances_dir()))


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        node.query(
            "CREATE TABLE test(date Date, id UInt32) ENGINE = MergeTree() PARTITION BY date ORDER BY id"
        )
        yield cluster
    finally:
        cluster.shutdown()


def test_reload_max_table_size_to_drop(start_cluster):
    node.query("INSERT INTO test VALUES (now(), 0)")
Ejemplo n.º 12
0
            CREATE TABLE `{database}`.src (p UInt64, d UInt64)
            ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard1{shard}/replicated', '{replica}')
            ORDER BY d PARTITION BY p
            SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
            CREATE TABLE `{database}`.dest (p UInt64, d UInt64)
            ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}')
            ORDER BY d PARTITION BY p
            SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
        """.format(shard=shard,
                   replica=node.name,
                   database=CLICKHOUSE_DATABASE))


cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1",
                             main_configs=["configs/remote_servers.xml"],
                             with_zookeeper=True)
node2 = cluster.add_instance("node2",
                             main_configs=["configs/remote_servers.xml"],
                             with_zookeeper=True)


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        initialize_database([node1, node2], 1)
        yield cluster
    except Exception as ex:
        print(ex)
    finally:
Ejemplo n.º 13
0
import time
import os
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from helpers.client import CommandRequest


cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 1})
node2 = cluster.add_instance('node2', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 2})
nodes = [node1, node2]

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        yield cluster

    finally:
        pass
        cluster.shutdown()


def test_random_inserts(started_cluster):
    # Duration of the test, reduce it if don't want to wait
Ejemplo n.º 14
0
libprotoc 3.0.0

# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
import kafka_pb2


# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for mat. view is working.
# TODO: add test for SELECT LIMIT is working.
# TODO: modify tests to respect `skip_broken_messages` setting.

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                main_configs=['configs/kafka.xml'],
                                with_kafka=True,
                                clickhouse_path_dir='clickhouse_path')
kafka_id = ''


# Helpers

def check_kafka_is_available():
    p = subprocess.Popen(('docker',
                          'exec',
                          '-i',
                          kafka_id,
                          '/usr/bin/kafka-broker-api-versions',
                          '--bootstrap-server',
                          'INSIDE://localhost:9092'),
                         stdout=subprocess.PIPE)
Ejemplo n.º 15
0
import time
import pytest

import os
import pymysql.cursors
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers.cluster import ClickHouseCluster

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
node1 = cluster.add_instance('node1', with_odbc_drivers=True, with_mysql=True, image='alesapin/ubuntu_with_odbc', main_configs=['configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml', 'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml', 'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'], stay_alive=True)

create_table_sql_template =   """
    CREATE TABLE `clickhouse`.`{}` (
    `id` int(11) NOT NULL,
    `name` varchar(50) NOT NULL,
    `age` int  NOT NULL default 0,
    `money` int NOT NULL default 0,
    PRIMARY KEY (`id`)) ENGINE=InnoDB;
    """
def get_mysql_conn():
    conn = pymysql.connect(user='******', password='******', host='127.0.0.1', port=3308)
    return conn

def create_mysql_db(conn, name):
    with conn.cursor() as cursor:
        cursor.execute(
            "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
Ejemplo n.º 16
0
import os
import pwd
import re
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', stay_alive=True)
other_user_id = pwd.getpwnam('nobody').pw_uid
current_user_id = os.getuid()

@pytest.fixture(scope="module", autouse=True)
def started_cluster():
    try:
        if current_user_id != 0:
            return

        cluster.start()
        yield cluster

    finally:
        cluster.shutdown(ignore_fatal=True)


def test_different_user(started_cluster):
    with pytest.raises(Exception):
        node.stop_clickhouse()
        node.exec_in_container(["bash", "-c", f"chown {other_user_id} /var/lib/clickhouse"], privileged=True)
        node.start_clickhouse(start_wait_sec=3)

    log = node.grep_in_log("Effective")
Ejemplo n.º 17
0
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
server = cluster.add_instance('server',
                              user_configs=["configs/users.d/network.xml"])

clientA1 = cluster.add_instance('clientA1', hostname='clientA1.com')
clientA2 = cluster.add_instance('clientA2', hostname='clientA2.com')
clientA3 = cluster.add_instance('clientA3', hostname='clientA3.com')
clientB1 = cluster.add_instance('clientB1', hostname='clientB001.ru')
clientB2 = cluster.add_instance('clientB2', hostname='clientB002.ru')
clientB3 = cluster.add_instance('clientB3',
                                hostname='xxx.clientB003.rutracker.com')
clientC1 = cluster.add_instance('clientC1', hostname='clientC01.ru')
clientC2 = cluster.add_instance('clientC2', hostname='xxx.clientC02.ru')
clientC3 = cluster.add_instance('clientC3',
                                hostname='xxx.clientC03.rutracker.com')
clientD1 = cluster.add_instance('clientD1', hostname='clientD0001.ru')
clientD2 = cluster.add_instance('clientD2', hostname='xxx.clientD0002.ru')
clientD3 = cluster.add_instance('clientD3', hostname='clientD0003.ru')


def check_clickhouse_is_ok(client_node, server_node):
    assert client_node.exec_in_container([
        "bash", "-c", "/usr/bin/curl -s {}:8123 ".format(server_node.hostname)
    ]) == "Ok.\n"


def query_from_one_node_to_another(client_node, server_node, query):
    check_clickhouse_is_ok(client_node, server_node)
Ejemplo n.º 18
0
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1',
                             main_configs=['configs/remote_servers.xml'])
node2 = cluster.add_instance('node2',
                             main_configs=['configs/remote_servers.xml'])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for node in (node1, node2):
            node.query('''
CREATE TABLE local_table(id UInt32, val String) ENGINE = TinyLog;
''')

        node1.query("INSERT INTO local_table VALUES (1, 'node1')")
        node2.query("INSERT INTO local_table VALUES (2, 'node2')")

        node1.query('''
CREATE TABLE distributed_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table);
CREATE TABLE merge_table(id UInt32, val String) ENGINE = Merge(default, '^distributed_table')
''')
Ejemplo n.º 19
0
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1')


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()


def test_attach_without_checksums(start_cluster):
    node1.query(
        "CREATE TABLE test (date Date, key Int32, value String) Engine=MergeTree ORDER BY key PARTITION by date"
    )

    node1.query(
        "INSERT INTO test SELECT toDate('2019-10-01'), number, toString(number) FROM numbers(100)"
    )

    assert node1.query(
        "SELECT COUNT() FROM test WHERE key % 10 == 0") == "10\n"

    node1.query("ALTER TABLE test DETACH PARTITION '2019-10-01'")
Ejemplo n.º 20
0
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance')


@pytest.fixture(scope="module", autouse=True)
def start_cluster():
    try:
        cluster.start()

        instance.query("CREATE DATABASE test")
        instance.query(
            "CREATE TABLE test.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()"
        )
        instance.query("INSERT INTO test.table VALUES (1,5), (2,10)")

        yield cluster

    finally:
        cluster.shutdown()


@pytest.fixture(autouse=True)
def cleanup_after_test():
    try:
        yield
    finally:
        instance.query("DROP USER IF EXISTS A, B")
Ejemplo n.º 21
0
import pytest
import time
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
    "node",
    stay_alive=True,
    main_configs=[
        "configs/config.d/query_log.xml",
        "configs/config.d/schema_cache.xml",
    ],
)


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()


def get_profile_event_for_query(node, query, profile_event):
    node.query("system flush logs")
    query = query.replace("'", "\\'")
    return int(
        node.query(
            f"select ProfileEvents['{profile_event}'] from system.query_log where query='{query}' and type = 'QueryFinish' order by event_time desc limit 1"
        )
Ejemplo n.º 22
0
import re
import time

import pytest
import requests
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=['configs/prom_conf.xml'])


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()


def parse_response_line(line):
    allowed_prefixes = [
        "ClickHouse",
        "# HELP",
        "# TYPE",
    ]
    assert any(line.startswith(prefix) for prefix in allowed_prefixes)

    if line.startswith("#"):
        return {}
    match = re.match('^([a-zA-Z_:][a-zA-Z0-9_:]+)(\{.*\})? (\d)', line)
Ejemplo n.º 23
0
import pytest
import time

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager


cluster = ClickHouseCluster(__file__)

instance_with_dist_table = cluster.add_instance('instance_with_dist_table', main_configs=['configs/remote_servers.xml'])
replica1 = cluster.add_instance('replica1', with_zookeeper=True)
replica2 = cluster.add_instance('replica2', with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for replica in (replica1, replica2):
            replica.query(
                "CREATE TABLE replicated (d Date, x UInt32) ENGINE = "
                "ReplicatedMergeTree('/clickhouse/tables/replicated', '{instance}', d, d, 8192)")

        instance_with_dist_table.query(
            "CREATE TABLE distributed (d Date, x UInt32) ENGINE = "
            "Distributed('test_cluster', 'default', 'replicated')")

        yield cluster

    finally:
        cluster.shutdown()
Ejemplo n.º 24
0
import os
import pytest
import subprocess
import time
import pymysql.connections

from docker.models.containers import Container

from helpers.cluster import ClickHouseCluster

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

config_dir = os.path.join(SCRIPT_DIR, './configs')
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
    'node',
    config_dir=config_dir,
    env_variables={'UBSAN_OPTIONS': 'print_stacktrace=1'})

server_port = 9001


@pytest.fixture(scope="module")
def server_address():
    cluster.start()
    try:
        yield cluster.get_instance_ip('node')
    finally:
        cluster.shutdown()


@pytest.fixture(scope='module')
Ejemplo n.º 25
0
import time
import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_zookeeper=True)
node2 = cluster.add_instance('node2', with_zookeeper=True)

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster

    except Exception as ex:
        print ex

    finally:
        cluster.shutdown()

def drop_table(nodes, table_name):
    for node in nodes:
        node.query("DROP TABLE IF EXISTS {}".format(table_name))

def test_ttl_columns(start_cluster):
    drop_table([node1, node2], "test_ttl")
    for node in [node1, node2]:
        node.query(
Ejemplo n.º 26
0
from kazoo.client import KazooClient, KazooState
from kazoo.security import ACL, make_digest_acl, make_acl
from kazoo.exceptions import (
    AuthFailedError,
    InvalidACLError,
    NoAuthError,
    KazooException,
)
import os
import time

cluster = ClickHouseCluster(__file__)

node = cluster.add_instance(
    "node",
    main_configs=["configs/keeper_config.xml", "configs/logs_conf.xml"],
    stay_alive=True,
)


def start_zookeeper():
    node.exec_in_container(
        ["bash", "-c", "/opt/zookeeper/bin/zkServer.sh start"])


def stop_zookeeper():
    node.exec_in_container(
        ["bash", "-c", "/opt/zookeeper/bin/zkServer.sh stop"])


def clear_zookeeper():
Ejemplo n.º 27
0
import time
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir="configs", with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", with_zookeeper=True)
node3 = cluster.add_instance('node3', config_dir="configs", with_zookeeper=True)
node4 = cluster.add_instance('node4', config_dir="configs", with_zookeeper=True)

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        for node, shard in [(node1, 1), (node2, 1), (node3, 2), (node4, 2)]:
            node.query(
            '''
                CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
                ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}')
                PARTITION BY date
                ORDER BY id
            '''.format(shard=shard, replica=node.name), settings={"password": "******"})

        yield cluster

    finally:
        cluster.shutdown()

def test_truncate(start_cluster):
    node1.query("insert into test_table values ('2019-02-15', 1, 2), ('2019-02-15', 2, 3), ('2019-02-15', 3, 4)", settings={"password": "******"})
Ejemplo n.º 28
0
import logging
import os.path as p
import pytest
import uuid

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from string import Template

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance("instance", main_configs=["configs/jdbc_bridge.xml"], with_jdbc_bridge=True)
datasource = "self"
records = 1000

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance.query('''
            CREATE DATABASE test;
            CREATE TABLE test.ClickHouseTable(Num UInt32, Str String, Desc Nullable(String)) engine = Memory;
            INSERT INTO test.ClickHouseTable(Num, Str)
            SELECT number, toString(number) FROM system.numbers LIMIT {};
        '''.format(records))

        while True:
            datasources = instance.query("select * from jdbc('', 'show datasources')")
            if 'self' in datasources:
                logging.debug(f"JDBC Driver self datasource initialized.\n{datasources}")
                break
            else:
Ejemplo n.º 29
0
import os.path as p
import time
import datetime
import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV


cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', main_configs=['configs/graphite_rollup.xml'])

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance.query('CREATE DATABASE test')

        yield cluster

    finally:
        cluster.shutdown()

@pytest.fixture
def graphite_table(started_cluster):
    instance.query('''
DROP TABLE IF EXISTS test.graphite;
CREATE TABLE test.graphite
    (metric String, value Float64, timestamp UInt32, date Date, updated UInt32)
    ENGINE = GraphiteMergeTree(date, (metric, timestamp), 8192, 'graphite_rollup');
''')
Ejemplo n.º 30
0
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry

from helpers.client import QueryRuntimeException

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
    'node1',
    main_configs=["configs/config.d/clusters.xml"],
    user_configs=["configs/users.d/default_with_password.xml"],
    with_zookeeper=True)
node2 = cluster.add_instance(
    'node2',
    main_configs=["configs/config.d/clusters.xml"],
    user_configs=["configs/users.d/default_with_password.xml"],
    with_zookeeper=True)
node3 = cluster.add_instance(
    'node3',
    main_configs=["configs/config.d/clusters.xml"],
    user_configs=["configs/users.d/default_with_password.xml"],
    with_zookeeper=True)
node4 = cluster.add_instance(
    'node4',
    main_configs=["configs/config.d/clusters.xml"],
    user_configs=["configs/users.d/default_with_password.xml"],
    with_zookeeper=True)
node5 = cluster.add_instance(
    'node5',
    main_configs=["configs/config.d/clusters.xml"],
Ejemplo n.º 31
0
import pytest
import time
import os, sys

sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import helpers

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager


cluster = ClickHouseCluster(__file__)

# Cluster with 2 shards of 2 replicas each. node_1_1 is the instance with Distributed table.
# Thus we have a shard with a local replica and a shard with remote replicas.
node_1_1 = instance_with_dist_table = cluster.add_instance(
    'node_1_1', with_zookeeper=True, main_configs=['configs/remote_servers.xml'])
node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True)
node_2_1 = cluster.add_instance('node_2_1', with_zookeeper=True)
node_2_2 = cluster.add_instance('node_2_2', with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for shard in (1, 2):
            for replica in (1, 2):
                node = cluster.instances['node_{}_{}'.format(shard, replica)]
                node.query('''
CREATE TABLE replicated (d Date, x UInt32) ENGINE =
    ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}', d, d, 8192)'''
Ejemplo n.º 32
0
import time

import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', with_zookeeper=True)
node2 = cluster.add_instance('node2', with_zookeeper=True)


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for node in [node1, node2]:
            node.query('''
            CREATE TABLE replicated_mt(date Date, id UInt32, value Int32)
            ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id;
                '''.format(replica=node.name))

        node1.query('''
            CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32)
            ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id;
        ''')

        yield cluster
Ejemplo n.º 33
0
import time

import pytest

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_restricted.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_restricted.xml'], with_zookeeper=True)
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_allowed.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], user_configs=['configs/user_good_allowed.xml'], with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()


        for node in [node1, node2]:
            node.query('''
            CREATE TABLE sometable(date Date, id UInt32, value Int32)
    ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '{replica}', date, id, 8192);
                '''.format(replica=node.name), user='******')

        for node in [node3, node4]:
            node.query('''
            CREATE TABLE someothertable(date Date, id UInt32, value Int32)
    ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/someothertable', '{replica}', date, id, 8192);
                '''.format(replica=node.name), user='******')
Ejemplo n.º 34
0
import pytest
import time
import psycopg2

from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
                             main_configs=["configs/named_collections.xml"],
                             with_postgres=True)

postgres_table_template = """
    CREATE TABLE IF NOT EXISTS {} (
    id Integer NOT NULL, value Integer, PRIMARY KEY (id))
    """


def get_postgres_conn(cluster, database=False):
    if database == True:
        conn_string = f"host={cluster.postgres_ip} port={cluster.postgres_port} dbname='test_database' user='******' password='******'"
    else:
        conn_string = f"host={cluster.postgres_ip} port={cluster.postgres_port} user='******' password='******'"
    conn = psycopg2.connect(conn_string)
    conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    conn.autocommit = True
    return conn


def create_postgres_db(cursor, name):
Ejemplo n.º 35
0
import time
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', user_configs=['configs/config_no_substs.xml']) # hardcoded value 33333
node2 = cluster.add_instance('node2', user_configs=['configs/config_env.xml'], env_variables={"MAX_QUERY_SIZE": "55555"})
node3 = cluster.add_instance('node3', user_configs=['configs/config_zk.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', user_configs=['configs/config_incl.xml'], main_configs=['configs/max_query_size.xml']) # include value 77777
node5 = cluster.add_instance('node5', user_configs=['configs/config_allow_databases.xml'])
node6 = cluster.add_instance('node6', user_configs=['configs/config_include_from_env.xml'], env_variables={"INCLUDE_FROM_ENV": "/etc/clickhouse-server/config.d/max_query_size.xml"}, main_configs=['configs/max_query_size.xml'])

@pytest.fixture(scope="module")
def start_cluster():
    try:
        def create_zk_roots(zk):
            zk.create(path="/setting/max_query_size", value="77777", makepath=True)
        cluster.add_zookeeper_startup_command(create_zk_roots)

        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()

def test_config(start_cluster):
   assert node1.query("select value from system.settings where name = 'max_query_size'") == "33333\n"
   assert node2.query("select value from system.settings where name = 'max_query_size'") == "55555\n"
   assert node3.query("select value from system.settings where name = 'max_query_size'") == "77777\n"
   assert node4.query("select value from system.settings where name = 'max_query_size'") == "99999\n"
   assert node6.query("select value from system.settings where name = 'max_query_size'") == "99999\n"
Ejemplo n.º 36
0
import time

import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

instance_test_mutations = cluster.add_instance(
    'test_mutations_with_merge_tree',
    main_configs=['configs/config.xml'],
    user_configs=['configs/users.xml'])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance_test_mutations.query(
            '''CREATE TABLE test_mutations_with_ast_elements(date Date, a UInt64, b String) ENGINE = MergeTree(date, (a, date), 8192)'''
        )
        instance_test_mutations.query(
            '''INSERT INTO test_mutations_with_ast_elements SELECT '2019-07-29' AS date, 1, toString(number) FROM numbers(1) SETTINGS force_index_by_date = 0, force_primary_key = 0'''
        )
        yield cluster
    finally:
        cluster.shutdown()


def test_mutations_in_partition_background(started_cluster):
    try:
        numbers = 100
Ejemplo n.º 37
0
import pytest
import time

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager


cluster = ClickHouseCluster(__file__)

instance_with_dist_table = cluster.add_instance('instance_with_dist_table', main_configs=['configs/remote_servers.xml'])
remote = cluster.add_instance('remote')

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        remote.query("CREATE TABLE local (x UInt32) ENGINE = Log")

        instance_with_dist_table.query('''
CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local')
''')

        yield cluster

    finally:
        cluster.shutdown()


def test_reconnect(started_cluster):
    with PartitionManager() as pm:
Ejemplo n.º 38
0
import pytest
from helpers.cluster import ClickHouseCluster
import random
import string
import os
import time
from multiprocessing.dummy import Pool
from helpers.network import PartitionManager
from helpers.test_tools import assert_eq_with_retry
from kazoo.client import KazooClient, KazooState

cluster = ClickHouseCluster(__file__)
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")

node1 = cluster.add_instance(
    "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True
)
node2 = cluster.add_instance("node2", main_configs=[], stay_alive=True)
node3 = cluster.add_instance("node3", main_configs=[], stay_alive=True)


def get_fake_zk(node, timeout=30.0):
    _fake_zk_instance = KazooClient(
        hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout
    )
    _fake_zk_instance.start()
    return _fake_zk_instance


@pytest.fixture(scope="module")
def started_cluster():
Ejemplo n.º 39
0
libprotoc 3.0.0

# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
import kafka_pb2


# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for mat. view is working.
# TODO: add test for SELECT LIMIT is working.
# TODO: modify tests to respect `skip_broken_messages` setting.

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                main_configs=['configs/kafka.xml'],
                                with_kafka=True,
                                clickhouse_path_dir='clickhouse_path')
kafka_id = ''


# Helpers

def check_kafka_is_available():
    p = subprocess.Popen(('docker',
                          'exec',
                          '-i',
                          kafka_id,
                          '/usr/bin/kafka-broker-api-versions',
                          '--bootstrap-server',
                          'INSIDE://localhost:9092'),
                         stdout=subprocess.PIPE)
Ejemplo n.º 40
0
#!/usr/bin/env python3
##!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
from multiprocessing.dummy import Pool
from kazoo.client import KazooClient, KazooState
import random
import string
import os
import time

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1",
                             main_configs=["configs/keeper_config1.xml"],
                             stay_alive=True)
node2 = cluster.add_instance("node2",
                             main_configs=["configs/keeper_config2.xml"],
                             stay_alive=True)
node3 = cluster.add_instance("node3",
                             main_configs=["configs/keeper_config3.xml"],
                             stay_alive=True)


def start_zookeeper(node):
    node1.exec_in_container(
        ["bash", "-c", "/opt/zookeeper/bin/zkServer.sh start"])


def stop_zookeeper(node):
    node.exec_in_container(
        ["bash", "-c", "/opt/zookeeper/bin/zkServer.sh stop"])
Ejemplo n.º 41
0
import time
import pytest
import string
import random

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/zstd_compression_by_default.xml'])
node2 = cluster.add_instance('node2', main_configs=['configs/lz4hc_compression_by_default.xml'])
node3 = cluster.add_instance('node3', main_configs=['configs/custom_compression_by_default.xml'])

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()


def test_preconfigured_default_codec(start_cluster):
    for node in [node1, node2]:
        node.query("""
        CREATE TABLE compression_codec_multiple_with_key (
            somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
            id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
            data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4),
            somecolumn Float64
Ejemplo n.º 42
0
import os
import subprocess
import time

import docker
import pymysql.connections
import pytest
from docker.models.containers import Container
from helpers.cluster import ClickHouseCluster, get_docker_compose_path

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DOCKER_COMPOSE_PATH = get_docker_compose_path()

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=["configs/log_conf.xml", "configs/ssl_conf.xml", "configs/mysql.xml",
                                                  "configs/dhparam.pem", "configs/server.crt", "configs/server.key"],
                            user_configs=["configs/users.xml"], env_variables={'UBSAN_OPTIONS': 'print_stacktrace=1'})

server_port = 9001


@pytest.fixture(scope="module")
def server_address():
    cluster.start()
    try:
        yield cluster.get_instance_ip('node')
    finally:
        cluster.shutdown()


@pytest.fixture(scope='module')
Ejemplo n.º 43
0
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                config_dir="configs")




@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        yield cluster

    finally:
        cluster.shutdown()


def test_read_only_constraint(started_cluster):
    # Change a setting for session with SET.
    assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
           "0\n"

    expected_error = "Setting force_index_by_date should not be changed"
    assert expected_error in instance.query_and_get_error("SET force_index_by_date=1")

    # Change a setting for query with SETTINGS.
    assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
           "0\n"
import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__, name="aggregate_fixed_key")
node1 = cluster.add_instance(
    "node1",
    with_zookeeper=True,
    image="yandex/clickhouse-server",
    tag="21.3",
    with_installed_binary=True,
)
node2 = cluster.add_instance("node2", with_zookeeper=True)
node3 = cluster.add_instance("node3", with_zookeeper=True)


@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()
        yield cluster

    finally:
        cluster.shutdown()


def test_two_level_merge(start_cluster):
    for node in start_cluster.instances.values():
        node.query("""
            CREATE TABLE IF NOT EXISTS test_two_level_merge(date Date, zone UInt32, number UInt32)
            ENGINE = MergeTree() PARTITION BY toUInt64(number / 1000) ORDER BY tuple();
Ejemplo n.º 45
0
import pymysql.cursors
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers.cluster import ClickHouseCluster

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

cluster = ClickHouseCluster(__file__,
                            base_configs_dir=os.path.join(
                                SCRIPT_DIR, 'configs'))
node1 = cluster.add_instance(
    'node1',
    with_odbc_drivers=True,
    with_mysql=True,
    image='yandex/clickhouse-integration-test',
    main_configs=[
        'configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml',
        'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml',
        'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'
    ],
    stay_alive=True)

create_table_sql_template = """
    CREATE TABLE `clickhouse`.`{}` (
    `id` int(11) NOT NULL,
    `name` varchar(50) NOT NULL,
    `age` int  NOT NULL default 0,
    `money` int NOT NULL default 0,
    `column_x` int default NULL,
    PRIMARY KEY (`id`)) ENGINE=InnoDB;
    """
Ejemplo n.º 46
0
import pytest
import time
import os
from contextlib import contextmanager

from helpers.cluster import ClickHouseCluster
from helpers.cluster import ClickHouseKiller
from helpers.network import PartitionManager
from helpers.network import PartitionManagerDisabler

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__,
                            base_configs_dir=os.path.join(
                                SCRIPT_DIR, 'configs'))

dictionary_node = cluster.add_instance('dictionary_node', stay_alive=True)
main_node = cluster.add_instance(
    'main_node',
    main_configs=['configs/dictionaries/cache_ints_dictionary.xml'])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        dictionary_node.query("create database if not exists test;")
        dictionary_node.query("drop table if exists test.ints;")
        dictionary_node.query("create table test.ints "
                              "(key UInt64, "
                              "i8 Int8,  i16 Int16,  i32 Int32,  i64 Int64, "
                              "u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) "
Ejemplo n.º 47
0
from contextlib import contextmanager

import pytest

## sudo -H pip install PyMySQL
import pymysql.cursors

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql = True)
create_table_sql_template =   """
    CREATE TABLE `clickhouse`.`{}` (
    `id` int(11) NOT NULL,
    `name` varchar(50) NOT NULL,
    `age` int  NOT NULL default 0,
    `money` int NOT NULL default 0,
    PRIMARY KEY (`id`)) ENGINE=InnoDB;
    """

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        conn = get_mysql_conn()
        ## create mysql db and table
        create_mysql_db(conn, 'clickhouse')
        yield cluster
Ejemplo n.º 48
0
import os
import sys
import time

import pytest

sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', stay_alive=True, main_configs=[])


# Something like https://reviews.llvm.org/D33325
def skip_test_msan(instance):
    if instance.is_built_with_memory_sanitizer():
        pytest.skip("Memory Sanitizer cannot work with vfork")


def copy_file_to_container(local_path, dist_path, container_id):
    os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path,
                                                          cont_id=container_id,
                                                          dist=dist_path))


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
Ejemplo n.º 49
0
import time
import pytest
import requests
from tempfile import NamedTemporaryFile
from helpers.hdfs_api import HDFSApi

import os

from helpers.cluster import ClickHouseCluster
import subprocess


SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_hdfs=True, config_dir="configs", main_configs=['configs/log_conf.xml'])

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        yield cluster

    except Exception as ex:
        print(ex)
        raise ex
    finally:
        cluster.shutdown()

def test_read_write_storage(started_cluster):
Ejemplo n.º 50
0
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance("instance",
                                clickhouse_path_dir="clickhouse_path")


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance.query("CREATE DATABASE test")
        yield cluster

    finally:
        cluster.shutdown()


def create_simple_table():
    instance.query("DROP TABLE IF EXISTS test.simple")
    instance.query("""
        CREATE TABLE test.simple (key UInt64, value String)
            ENGINE = MergeTree ORDER BY tuple();
        """)


def test_protobuf_format_input(started_cluster):
    create_simple_table()
    instance.http_query(
        "INSERT INTO test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'",
Ejemplo n.º 51
0
    for node in nodes:
        node.query(
        '''
            CREATE DATABASE test;

            CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
            ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}')
            PARTITION BY date
            ORDER BY id
            SETTINGS
                replicated_max_parallel_fetches_for_host={connections},
                index_granularity=8192;
        '''.format(shard=shard, replica=node.name, connections=connections_count))

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)

@pytest.fixture(scope="module")
def start_small_cluster():
    try:
        cluster.start()

        _fill_nodes([node1, node2], 1, 1)

        yield cluster

    finally:
        cluster.shutdown()

def test_single_endpoint_connections_count(start_small_cluster):
Ejemplo n.º 52
0
import os
import warnings
import time

import pymysql
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster

SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
    'node1',
    with_mysql=True,
    dictionaries=['configs/dictionaries/simple_dictionary.xml'],
    user_configs=['configs/user_admin.xml', 'configs/user_default.xml'])
node2 = cluster.add_instance(
    'node2',
    with_mysql=True,
    dictionaries=['configs/dictionaries/simple_dictionary.xml'],
    main_configs=[
        'configs/dictionaries/lazy_load.xml', 'configs/allow_remote_node.xml'
    ],
    user_configs=['configs/user_admin.xml', 'configs/user_default.xml'])
node3 = cluster.add_instance(
    'node3',
    main_configs=['configs/allow_remote_node.xml'],
    dictionaries=[
        'configs/dictionaries/dictionary_with_conflict_name.xml',
        'configs/dictionaries/conflict_name_dictionary.xml'
Ejemplo n.º 53
0
import time
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV


cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        node_to_shards = [
            (node1, [0, 2]),
            (node2, [0, 1]),
            (node3, [1, 2]),
        ]

        for node, shards in node_to_shards:
            for shard in shards:
                node.query('''
CREATE DATABASE shard_{shard};
Ejemplo n.º 54
0
#  [0.offset_in_compressed_file, 189.offset_in_compressed_file] = [0, 2003111].
#  But it's incorrect range, because actually dictionary ends in offset 2081424,
#  but all marks from 186 to 191 share this same dictionary. If we try to read
#  data from [0, 2003111] we will not be able to do it, because it will be
#  impossible to read dictionary.
#
#  So this buggy logic was fixed and this test confirms this. At first I've
#  tried to get sane numbers for data, but the error didn't reproduce. After
#  three tries with almost random numbers of rows the error was reproduced.


import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", main_configs=["configs/s3.xml"], with_minio=True)

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()


def test_s3_right_border(started_cluster):
    node1.query("""
CREATE TABLE s3_low_cardinality
(
Ejemplo n.º 55
0
from contextlib import contextmanager

import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'])
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'])

@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        for node in (node1, node2):
            node.query('''
CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id;
CREATE TABLE local_table_2(id UInt32, val String) ENGINE = MergeTree ORDER BY id;
''')

        node1.query("INSERT INTO local_table VALUES (1, 'node1')")
        node2.query("INSERT INTO local_table VALUES (2, 'node2')")

        node1.query('''
CREATE TABLE distributed_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table);
CREATE TABLE distributed_table_2(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table_2);
CREATE TABLE merge_table(id UInt32, val String) ENGINE = Merge(default, '^distributed_table')
''')
Ejemplo n.º 56
0
import time

import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV

cluster = ClickHouseCluster(__file__)

instance_test_reconnect = cluster.add_instance(
    "instance_test_reconnect", main_configs=["configs/remote_servers.xml"]
)
instance_test_inserts_batching = cluster.add_instance(
    "instance_test_inserts_batching",
    main_configs=["configs/remote_servers.xml"],
    user_configs=["configs/enable_distributed_inserts_batching.xml"],
)
remote = cluster.add_instance(
    "remote", main_configs=["configs/forbid_background_merges.xml"]
)

instance_test_inserts_local_cluster = cluster.add_instance(
    "instance_test_inserts_local_cluster", main_configs=["configs/remote_servers.xml"]
)

node1 = cluster.add_instance(
    "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
)
node2 = cluster.add_instance(
    "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
Ejemplo n.º 57
0
import time
import pytest
import string
import random

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1', main_configs=['configs/zstd_compression_by_default.xml'])
node2 = cluster.add_instance('node2', main_configs=['configs/lz4hc_compression_by_default.xml'])
node3 = cluster.add_instance('node3', main_configs=['configs/custom_compression_by_default.xml'])
node4 = cluster.add_instance('node4', user_configs=['configs/enable_uncompressed_cache.xml'])
node5 = cluster.add_instance('node5', main_configs=['configs/zstd_compression_by_default.xml'], user_configs=['configs/enable_uncompressed_cache.xml'])

@pytest.fixture(scope="module")
def start_cluster():
    try:
        cluster.start()

        yield cluster
    finally:
        cluster.shutdown()


def test_preconfigured_default_codec(start_cluster):
    for node in [node1, node2]:
        node.query("""
        CREATE TABLE compression_codec_multiple_with_key (
            somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
            id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
Ejemplo n.º 58
0
import pytest
from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
                                clickhouse_path_dir='clickhouse_path')


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        instance.query('CREATE DATABASE test')
        yield cluster

    finally:
        cluster.shutdown()


def create_simple_table():
    instance.query("DROP TABLE IF EXISTS test.simple")
    instance.query('''
        CREATE TABLE test.simple (key UInt64, value String)
            ENGINE = MergeTree ORDER BY tuple();
        ''')


def test_protobuf_format_input(started_cluster):
    create_simple_table()
    instance.http_query(
        "INSERT INTO test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'",
Ejemplo n.º 59
0
import time

import pytest
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV

cluster = ClickHouseCluster(__file__)

instance_test_reconnect = cluster.add_instance(
    "instance_test_reconnect", main_configs=["configs/remote_servers.xml"])
instance_test_inserts_batching = cluster.add_instance(
    "instance_test_inserts_batching",
    main_configs=["configs/remote_servers.xml"],
    user_configs=["configs/enable_distributed_inserts_batching.xml"],
)
remote = cluster.add_instance(
    "remote", main_configs=["configs/forbid_background_merges.xml"])

instance_test_inserts_local_cluster = cluster.add_instance(
    "instance_test_inserts_local_cluster",
    main_configs=["configs/remote_servers.xml"])


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()

        remote.query("CREATE TABLE local1 (x UInt32) ENGINE = Log")
Ejemplo n.º 60
0
import uuid

import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)

node1 = cluster.add_instance('node1',
                             main_configs=[
                                 'configs/remote_servers.xml',
                                 'configs/merge_tree_uuids.xml'
                             ],
                             with_zookeeper=True)

node2 = cluster.add_instance('node2',
                             main_configs=[
                                 'configs/remote_servers.xml',
                                 'configs/merge_tree_uuids.xml',
                                 'configs/merge_tree_in_memory.xml'
                             ],
                             with_zookeeper=True)


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()