def test_tps_multithreaded_async(): # test throughput throttling with simple objects and one thread lib.run_benchmark("--workload RU,0.0001 --duration 5 --start-key 0 " + "--keys 1000000000 -o I --throughput 1000 -z 16 --async") n_records = len(lib.scan_records()) # there is much higher variance with multiple threads assert(4950 <= n_records <= 5050)
def test_tps_read_write_high_variance_async(): # test throughput throttling writing complex objects and reading simple ones lib.run_benchmark("--workload RU,50 --duration 5 --start-key 0 " + "--keys 1000000000 -o \"I,{500*S64:[10*I,B128]}\" --throughput 500 " + "-z 1 --read-bins 1 --async") n_records = len(lib.scan_records()) assert(1150 <= n_records <= 1350)
def test_linear_delete(): # first fill up the database lib.run_benchmark("--workload I --start-key 0 --keys 100") lib.check_for_range(0, 100) # then delete it all lib.run_benchmark("--workload DB --start-key 0 --keys 100") lib.check_for_range(0, 0)
def test_write_bins_simple_async(): def check_bins(b): assert (len(b) == 2) lib.obj_spec_is_I1(b["testbin"]) lib.obj_spec_is_I3(b["testbin_3"]) lib.run_benchmark("--workload I --start-key 0 --keys 100 -o I1,I2,I3,I4 " + "--random --write-bins 1,3 --async") lib.check_for_range(0, 100, lambda meta, key, bins: check_bins(bins))
def test_random_udf(): lib.upload_udf("test_module.lua", udf_module) # initialize all records to have just one bin "testbin" with value 1 lib.run_benchmark("--workload I --start-key 0 --keys 20 -o 1") # the UDF should eventually reach all 20 records, incrementing "testbin" to 2 lib.run_benchmark("--duration 1 --workload RUF,0,0 -upn test_module " + "-ufn increment_bin_to_2 -ufv \\\"testbin\\\" --start-key 0 --keys 20 --random", do_reset=False) lib.check_for_range(0, 20, lambda meta, key, bins: lib.obj_spec_is_const_I(bins["testbin"], 2))
def test_linear_delete_subset_async(): # first fill up the database lib.run_benchmark("--workload I --start-key 0 --keys 1000") lib.check_for_range(0, 1000) # then delete a subset of the database lib.run_benchmark("--workload DB --start-key 300 --keys 500 --async", do_reset=False) lib.check_recs_exist_in_range(0, 300) lib.check_recs_exist_in_range(800, 1000) assert (len(lib.scan_records()) == 500)
def test_random_udf_subrange(): lib.upload_udf("test_module.lua", udf_module) # initialize all records to have just one bin "testbin" with value 1 lib.run_benchmark("--workload I --start-key 0 --keys 100 -o 1") # the UDF should eventually reach these 20 records, incrementing "testbin" to 2 lib.run_benchmark("--duration 1 --workload RUF,0,0 -upn test_module " + "-ufn increment_bin_to_2 -ufv \\\"testbin\\\" --start-key 15 --keys 20 --random", do_reset=False) assert(len(lib.scan_records()) == 100) lib.check_recs_exist_in_range(0, 15, lambda meta, key, bins: lib.obj_spec_is_const_I(bins["testbin"], 1)) lib.check_recs_exist_in_range(15, 35, lambda meta, key, bins: lib.obj_spec_is_const_I(bins["testbin"], 2)) lib.check_recs_exist_in_range(35, 100, lambda meta, key, bins: lib.obj_spec_is_const_I(bins["testbin"], 1))
def test_random_read_batch_async(): lib.run_benchmark("--duration 1 --workload RU --start-key 0 --keys 100 --batch-size 16 --async") n_recs = len(lib.scan_records()) # we must have created at least one record, and no more than 100 assert(n_recs > 0 and n_recs <= 100) # now check that no records exist with keys outside the range 0-100 # count the number of records with keys between 0 and 100 cnt = 0 for key in range(0, 100): if lib.get_record(key) is not None: cnt += 1 assert(cnt == n_recs)
def test_const_map_async(): def check_bin(meta, key, bins): assert("bin_1" in bins) assert("bin_2" in bins) lib.obj_spec_is_I1(bins["bin_1"]) lib.obj_spec_is_I1(bins["bin_2"] - 256) lib.reset() lib.upload_udf("test_module.lua", udf_module) # the UDF should eventually reach all 100 records, writing both "bin_1" and "bin_2" lib.run_benchmark("--duration 1 --workload RUF,0,0 -upn test_module " + "-ufn read_object -ufv \"{\\\"write_bin_1\\\":b,\\\"random_value\\\":I1}\" " + "--start-key 0 --keys 100 --random --async", do_reset=False) lib.check_for_range(0, 100, check_bin)
def test_write_bins_random(): def check_bins(b): assert (len(b) == 2) assert (type(b["testbin_2"]) is list) lib.obj_spec_is_D(b["testbin_2"][0]) lib.obj_spec_is_S(b["testbin_2"][1], 10) lib.obj_spec_is_B(b["testbin_5"], 10) lib.run_benchmark("--workload RU --duration 1 --start-key 0 --keys 200 " + "-o I5,[D,S10],I3,D,B10 --random --write-bins 2,5") # it's very, very likely that after 1 second, all 200 key values would # have been randomly chosen lib.check_for_range(0, 200, lambda meta, key, bins: check_bins(bins))
def test_const_fixed_map(): prev_bin_1 = -1 def check_bin(meta, key, bins): nonlocal prev_bin_1 assert("bin_1" in bins) lib.obj_spec_is_I1(bins["bin_1"]) if prev_bin_1 == -1: prev_bin_1 = bins["bin_1"] else: assert(bins["bin_1"] == prev_bin_1) lib.reset() lib.upload_udf("test_module.lua", udf_module) # the UDF should eventually reach all 100 records, writing both "bin_1" and "bin_2" lib.run_benchmark("--duration 1 --workload RUF,0,0 -upn test_module " + "-ufn read_object -ufv \"{\\\"write_bin_1\\\":true,\\\"random_value\\\":I1}\" " + "--start-key 0 --keys 100 -z 1", do_reset=False) lib.check_for_range(0, 100, check_bin)
def test_write_bins_delete(): def check_bins_before(b): assert (len(b) == 4) lib.obj_spec_is_I1(b["testbin"]) lib.obj_spec_is_I2(b["testbin_2"]) lib.obj_spec_is_I3(b["testbin_3"]) lib.obj_spec_is_I4(b["testbin_4"]) def check_bins_after(b): assert (len(b) == 2) lib.obj_spec_is_I2(b["testbin_2"]) lib.obj_spec_is_I4(b["testbin_4"]) lib.run_benchmark( "--workload I --start-key 0 --keys 100 -o I1,I2,I3,I4 --random") lib.check_for_range(0, 100, lambda meta, key, bins: check_bins_before(bins)) lib.run_benchmark( "--workload DB --start-key 0 --keys 100 -o I1,I2,I3,I4 " + "--write-bins 1,3", do_reset=False) lib.check_for_range(0, 100, lambda meta, key, bins: check_bins_after(bins))
def test_set_bin_fixed_bool(): true_cnt = 0 false_cnt = 0 def check_bin(meta, key, bins): nonlocal true_cnt nonlocal false_cnt assert("bool_bin" in bins) lib.obj_spec_is_b(bins["bool_bin"]) if bins["bool_bin"]: true_cnt += 1 else: false_cnt += 1 lib.upload_udf("test_module.lua", udf_module) # initialize all records to have just one bin "testbin" with value 1 lib.run_benchmark("--workload I --start-key 0 --keys 20 -o 1") # the UDF should eventually reach all 20 records, incrementing "testbin" to 2 lib.run_benchmark("--duration 1 --workload RUF,0,0 -upn test_module " + "-ufn set_bin -ufv \\\"bool_bin\\\",b --start-key 0 --keys 20 -z 1", do_reset=False) lib.check_for_range(0, 20, check_bin) assert((true_cnt > 0 and false_cnt == 0) or (true_cnt == 0 and false_cnt > 0))
def test_linear_write_start_key_async(): lib.run_benchmark("--workload I --start-key 1000 --keys 1000 --async") lib.check_for_range(1000, 2000)
def test_tps_simple(): # test throughput throttling with simple objects and one thread lib.run_benchmark("--workload RU,0.0001 --duration 5 --start-key 0 " + "--keys 1000000000 -o I --throughput 1000 -z 1") n_records = len(lib.scan_records()) assert(4950 <= n_records <= 5050)
def test_tps_read_write_async(): # test throughput throttling with simple objects and one thread lib.run_benchmark("--workload RU,50 --duration 5 --start-key 0 " + "--keys 1000000000 -o I --throughput 1000 -z 1 --async") n_records = len(lib.scan_records()) assert(2400 <= n_records <= 2600)
def test_linear_write_keys_async(): lib.run_benchmark("--workload I --keys 100 --async") lib.check_for_range(1, 101)
def test_connect(): # the only reason this would fail is if the tool can't connect to the # cluster lib.run_benchmark("--duration 0 --workload RU")
def test_no_connect(): # test's running the benchmark when the cluster is unreachable lib.run_benchmark("--duration 0 --workload RU", ip="127.0.0.1", port=1000, expect_success=False)