def test_tps_read_write_high_variance_async():
	# test throughput throttling writing complex objects and reading simple ones
	lib.run_benchmark("--workload RU,50 --duration 5 --start-key 0 " +
			"--keys 1000000000 -o \"I,{500*S64:[10*I,B128]}\" --throughput 500 " +
			"-z 1 --read-bins 1 --async")
	n_records = len(lib.scan_records())
	assert(1150 <= n_records <= 1350)
def test_tps_multithreaded_async():
	# test throughput throttling with simple objects and one thread
	lib.run_benchmark("--workload RU,0.0001 --duration 5 --start-key 0 " +
			"--keys 1000000000 -o I --throughput 1000 -z 16 --async")
	n_records = len(lib.scan_records())
	# there is much higher variance with multiple threads
	assert(4950 <= n_records <= 5050)
Ejemplo n.º 3
0
def test_linear_delete_subset_async():
    # first fill up the database
    lib.run_benchmark("--workload I --start-key 0 --keys 1000")
    lib.check_for_range(0, 1000)
    # then delete a subset of the database
    lib.run_benchmark("--workload DB --start-key 300 --keys 500 --async",
                      do_reset=False)
    lib.check_recs_exist_in_range(0, 300)
    lib.check_recs_exist_in_range(800, 1000)
    assert (len(lib.scan_records()) == 500)
Ejemplo n.º 4
0
def test_random_udf_subrange():
	lib.upload_udf("test_module.lua", udf_module)
	# initialize all records to have just one bin "testbin" with value 1
	lib.run_benchmark("--workload I --start-key 0 --keys 100 -o 1")
	# the UDF should eventually reach these 20 records, incrementing "testbin" to 2
	lib.run_benchmark("--duration 1 --workload RUF,0,0 -upn test_module " +
			"-ufn increment_bin_to_2 -ufv \\\"testbin\\\" --start-key 15 --keys 20 --random",
			do_reset=False)

	assert(len(lib.scan_records()) == 100)
	lib.check_recs_exist_in_range(0, 15, lambda meta, key, bins: lib.obj_spec_is_const_I(bins["testbin"], 1))
	lib.check_recs_exist_in_range(15, 35, lambda meta, key, bins: lib.obj_spec_is_const_I(bins["testbin"], 2))
	lib.check_recs_exist_in_range(35, 100, lambda meta, key, bins: lib.obj_spec_is_const_I(bins["testbin"], 1))
def test_random_read_batch_async():
	lib.run_benchmark("--duration 1 --workload RU --start-key 0 --keys 100 --batch-size 16 --async")

	n_recs = len(lib.scan_records())
	# we must have created at least one record, and no more than 100
	assert(n_recs > 0 and n_recs <= 100)

	# now check that no records exist with keys outside the range 0-100
	# count the number of records with keys between 0 and 100
	cnt = 0
	for key in range(0, 100):
		if lib.get_record(key) is not None:
			cnt += 1
	assert(cnt == n_recs)
def test_tps_read_write_async():
	# test throughput throttling with simple objects and one thread
	lib.run_benchmark("--workload RU,50 --duration 5 --start-key 0 " +
			"--keys 1000000000 -o I --throughput 1000 -z 1 --async")
	n_records = len(lib.scan_records())
	assert(2400 <= n_records <= 2600)
def test_tps_simple():
	# test throughput throttling with simple objects and one thread
	lib.run_benchmark("--workload RU,0.0001 --duration 5 --start-key 0 " +
			"--keys 1000000000 -o I --throughput 1000 -z 1")
	n_records = len(lib.scan_records())
	assert(4950 <= n_records <= 5050)