def test_s3_disk_restart_during_load(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 1024 * 1024))) node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 1024 * 1024, -1))) def read(): for ii in range(0, 20): logging.info("Executing %d query", ii) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" logging.info("Query %d executed", ii) time.sleep(0.2) def restart_disk(): for iii in range(0, 5): logging.info("Restarting disk, attempt %d", iii) node.query("SYSTEM RESTART DISK s3") logging.info("Disk restarted, attempt %d", iii) time.sleep(0.5) threads = [] for i in range(0, 4): threads.append(SafeThread(target=read)) threads.append(SafeThread(target=restart_disk)) for thread in threads: thread.start() for thread in threads: thread.join()
def test_restart_during_load(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) config_path = os.path.join( SCRIPT_DIR, "./{}/node/configs/config.d/storage_conf.xml".format( cluster.instances_dir_name), ) # Force multi-part upload mode. replace_config( config_path, "<container_already_exists>false</container_already_exists>", "") azure_query( node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}" ) azure_query( node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 4096, -1)}", ) def read(): for ii in range(0, 5): logging.info(f"Executing {ii} query") assert (azure_query( node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)") logging.info(f"Query {ii} executed") time.sleep(0.2) def restart_disk(): for iii in range(0, 2): logging.info(f"Restarting disk, attempt {iii}") node.query(f"SYSTEM RESTART DISK {AZURE_BLOB_STORAGE_DISK}") logging.info(f"Disk restarted, attempt {iii}") time.sleep(0.5) threads = [] for _ in range(0, 4): threads.append(SafeThread(target=read)) threads.append(SafeThread(target=restart_disk)) for thread in threads: thread.start() for thread in threads: thread.join()
def test_restart_during_load(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) # Force multi-part upload mode. replace_config( CONFIG_PATH, "<container_already_exists>false</container_already_exists>", "") node.query( f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}" ) node.query( f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 4096, -1)}" ) def read(): for ii in range(0, 5): logging.info(f"Executing {ii} query") assert node.query( f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" logging.info(f"Query {ii} executed") time.sleep(0.2) def restart_disk(): for iii in range(0, 2): logging.info(f"Restarting disk, attempt {iii}") node.query(f"SYSTEM RESTART DISK {BLOB_STORAGE_DISK}") logging.info(f"Disk restarted, attempt {iii}") time.sleep(0.5) threads = [] for _ in range(0, 4): threads.append(SafeThread(target=read)) threads.append(SafeThread(target=restart_disk)) for thread in threads: thread.start() for thread in threads: thread.join()