예제 #1
0
def test_s3_disk_apply_new_settings(cluster, node_name):
    node = cluster.instances[node_name]
    create_table(node, "s3_test")

    def get_s3_requests():
        node.query("SYSTEM FLUSH LOGS")
        return int(
            node.query(
                "SELECT value FROM system.events WHERE event='S3WriteRequestsCount'"
            ))

    s3_requests_before = get_s3_requests()
    node.query("INSERT INTO s3_test VALUES {}".format(
        generate_values("2020-01-03", 4096)))
    s3_requests_to_write_partition = get_s3_requests() - s3_requests_before

    # Force multi-part upload mode.
    replace_config(
        CONFIG_PATH,
        "<s3_max_single_part_upload_size>33554432</s3_max_single_part_upload_size>",
        "<s3_max_single_part_upload_size>0</s3_max_single_part_upload_size>",
    )

    node.query("SYSTEM RELOAD CONFIG")

    s3_requests_before = get_s3_requests()
    node.query("INSERT INTO s3_test VALUES {}".format(
        generate_values("2020-01-04", 4096, -1)))

    # There should be 3 times more S3 requests because multi-part upload mode uses 3 requests to upload object.
    assert get_s3_requests(
    ) - s3_requests_before == s3_requests_to_write_partition * 3
예제 #2
0
def test_apply_new_settings(cluster):
    node = cluster.instances[NODE_NAME]
    create_table(node, TABLE_NAME)
    config_path = os.path.join(
        SCRIPT_DIR,
        "./{}/node/configs/config.d/storage_conf.xml".format(
            cluster.instances_dir_name),
    )

    azure_query(
        node,
        f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}"
    )

    # Force multi-part upload mode.
    replace_config(
        config_path,
        "<max_single_part_upload_size>33554432</max_single_part_upload_size>",
        "<max_single_part_upload_size>4096</max_single_part_upload_size>",
    )

    node.query("SYSTEM RELOAD CONFIG")
    azure_query(
        node,
        f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096, -1)}",
    )
예제 #3
0
def test_restart_during_load(cluster):
    node = cluster.instances[NODE_NAME]
    create_table(node, TABLE_NAME)
    config_path = os.path.join(
        SCRIPT_DIR,
        "./{}/node/configs/config.d/storage_conf.xml".format(
            cluster.instances_dir_name),
    )

    # Force multi-part upload mode.
    replace_config(
        config_path,
        "<container_already_exists>false</container_already_exists>", "")

    azure_query(
        node,
        f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}"
    )
    azure_query(
        node,
        f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 4096, -1)}",
    )

    def read():
        for ii in range(0, 5):
            logging.info(f"Executing {ii} query")
            assert (azure_query(
                node,
                f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)")
            logging.info(f"Query {ii} executed")
            time.sleep(0.2)

    def restart_disk():
        for iii in range(0, 2):
            logging.info(f"Restarting disk, attempt {iii}")
            node.query(f"SYSTEM RESTART DISK {AZURE_BLOB_STORAGE_DISK}")
            logging.info(f"Disk restarted, attempt {iii}")
            time.sleep(0.5)

    threads = []
    for _ in range(0, 4):
        threads.append(SafeThread(target=read))

    threads.append(SafeThread(target=restart_disk))

    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()
예제 #4
0
파일: test.py 프로젝트: zzsmdfj/ClickHouse
def test_apply_new_settings(cluster):
    node = cluster.instances[NODE_NAME]
    create_table(node, TABLE_NAME)

    azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}")

    # Force multi-part upload mode.
    replace_config(
        CONFIG_PATH,
        "<max_single_part_upload_size>33554432</max_single_part_upload_size>",
        "<max_single_part_upload_size>4096</max_single_part_upload_size>")

    node.query("SYSTEM RELOAD CONFIG")
    azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096, -1)}")
예제 #5
0
파일: test.py 프로젝트: nkolotov/ClickHouse
def test_restart_during_load(cluster):
    node = cluster.instances[NODE_NAME]
    create_table(node, TABLE_NAME)

    # Force multi-part upload mode.
    replace_config(
        CONFIG_PATH,
        "<container_already_exists>false</container_already_exists>", "")

    node.query(
        f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}"
    )
    node.query(
        f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 4096, -1)}"
    )

    def read():
        for ii in range(0, 5):
            logging.info(f"Executing {ii} query")
            assert node.query(
                f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)"
            logging.info(f"Query {ii} executed")
            time.sleep(0.2)

    def restart_disk():
        for iii in range(0, 2):
            logging.info(f"Restarting disk, attempt {iii}")
            node.query(f"SYSTEM RESTART DISK {BLOB_STORAGE_DISK}")
            logging.info(f"Disk restarted, attempt {iii}")
            time.sleep(0.5)

    threads = []
    for _ in range(0, 4):
        threads.append(SafeThread(target=read))

    threads.append(SafeThread(target=restart_disk))

    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()