Exemple #1
0
def test_deduplication_works_in_case_of_intensive_inserts(started_cluster):
    inserters = []
    fetchers = []

    node1.query("""
        CREATE TABLE simple ON CLUSTER test_cluster (date Date, id UInt32)
        ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id"""
                )

    node1.query("INSERT INTO simple VALUES (0, 0)")

    for node in nodes:
        host = node.ip_address

        inserters.append(
            CommandRequest(
                ["/bin/bash"],
                timeout=10,
                stdin="""
set -e
for i in `seq 1000`; do
    {} --host {} -q "INSERT INTO simple VALUES (0, 0)"
done
""".format(cluster.get_client_cmd(), host),
            ))

        fetchers.append(
            CommandRequest(
                ["/bin/bash"],
                timeout=10,
                stdin="""
set -e
for i in `seq 1000`; do
    res=`{} --host {} -q "SELECT count() FROM simple"`
    if [[ $? -ne 0 || $res -ne 1 ]]; then
        echo "Selected $res elements! Host: {}" 1>&2
        exit -1
    fi;
done
""".format(cluster.get_client_cmd(), host, node.name),
            ))

    # There were not errors during INSERTs
    for inserter in inserters:
        try:
            inserter.get_answer()
        except QueryTimeoutExceedException:
            # Only timeout is accepted
            pass

    # There were not errors during SELECTs
    for fetcher in fetchers:
        try:
            fetcher.get_answer()
        except QueryTimeoutExceedException:
            # Only timeout is accepted
            pass

    node1.query("""DROP TABLE simple ON CLUSTER test_cluster""")
Exemple #2
0
def test_random_inserts(started_cluster):
    # Duration of the test, reduce it if don't want to wait
    DURATION_SECONDS = 10# * 60

    node1.query("""
        CREATE TABLE simple ON CLUSTER test_cluster (date Date, i UInt32, s String)
        ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, i, 8192)""")

    with PartitionManager() as pm_random_drops:
        for sacrifice in nodes:
            pass # This test doesn't work with partition problems still
            #pm_random_drops._add_rule({'probability': 0.01, 'destination': sacrifice.ip_address, 'source_port': 2181, 'action': 'REJECT --reject-with tcp-reset'})
            #pm_random_drops._add_rule({'probability': 0.01, 'source': sacrifice.ip_address, 'destination_port': 2181, 'action': 'REJECT --reject-with tcp-reset'})

        min_timestamp = int(time.time())
        max_timestamp = min_timestamp + DURATION_SECONDS
        num_timestamps = max_timestamp - min_timestamp + 1

        bash_script = os.path.join(os.path.dirname(__file__), "test.sh")
        inserters = []
        for node in nodes:
            cmd = ['/bin/bash', bash_script, node.ip_address, str(min_timestamp), str(max_timestamp)]
            inserters.append(CommandRequest(cmd, timeout=DURATION_SECONDS * 2, stdin=''))
            print node.name, node.ip_address

        for inserter in inserters:
            inserter.get_answer()

    answer="{}\t{}\t{}\t{}\n".format(num_timestamps, num_timestamps, min_timestamp, max_timestamp)

    for node in nodes:
        res = node.query("SELECT count(), uniqExact(i), min(i), max(i) FROM simple")
        assert TSV(res) == TSV(answer), node.name + " : " + node.query("SELECT groupArray(_part), i, count() AS c FROM simple GROUP BY i ORDER BY c DESC LIMIT 1")

    node1.query("""DROP TABLE simple ON CLUSTER test_cluster""")
Exemple #3
0
def test_deduplication_works_in_case_of_intensive_inserts(started_cluster):
    inserters = []
    fetchers = []

    for node in nodes:
        host = node.ip_address

        inserters.append(
            CommandRequest(['/bin/bash'],
                           timeout=10,
                           stdin="""
set -e
for i in `seq 1000`; do
    clickhouse-client --host {} -q "INSERT INTO simple VALUES (0, 0)"
done
""".format(host)))

        fetchers.append(
            CommandRequest(['/bin/bash'],
                           timeout=10,
                           stdin="""
set -e
for i in `seq 1000`; do
    res=`clickhouse-client --host {} -q "SELECT count() FROM simple"`
    if [[ $res -ne 1 ]]; then
        echo "Selected $res elements! Host: {}" 1>&2
        exit -1
    fi;
done
""".format(host, node.name)))

    # There were not errors during INSERTs
    for inserter in inserters:
        try:
            inserter.get_answer()
        except Exception as e:
            check_timeout_exception(e)

    # There were not errors during SELECTs
    for fetcher in fetchers:
        try:
            fetcher.get_answer()
        except Exception as e:
            pass