def test_workload(self): connServer = self.cluster[0] utils.print_with_time("Inserting data") rdb_workload_common.insert_many(conn=self.conn, table=self.table, count=10000) utils.print_with_time("Starting workload") with workload_runner.SplitOrContinuousWorkload(opts, connServer, db_name=self.dbName, table_name=self.tableName) as workload: utils.print_with_time("Running workload before") workload.run_before() self.checkCluster() for currentShards in opts["sequence"]: utils.print_with_time("Sharding table to %d shards" % currentShards) self.table.reconfigure(shards=currentShards, replicas=opts["num-nodes"]).run(self.conn) self.table.wait(wait_for='all_replicas_ready').run(self.conn) self.checkCluster() utils.print_with_time("Running workload after") workload.run_after() self.checkCluster() utils.print_with_time("Workload complete")
] time.sleep(10) print "Creating table..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) dc = http.add_datacenter() for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) db = http.add_database(name="test") ns = http.add_table(primary=dc, name="test", primary_key="foo") time.sleep(10) host, port = driver.get_table_host(processes) cluster.check() rdb_workload_common.insert_many(host=host, port=port, table="test", count=10000) print "Splitting into two shards..." http.add_table_shard(ns, "t") time.sleep(10) cluster.check() print "Merging shards together again..." http.remove_table_shard(ns, "t") time.sleep(10) cluster.check() cluster.check_and_stop()
for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) ns = http.add_table(primary=dc, name="stress", database=db) time.sleep(3) host, port = driver.get_table_host(processes) cluster.check() print "Increasing replication factor..." http.set_table_affinities(ns, {dc: 1}) time.sleep(3) cluster.check() print "Inserting some data..." rdb_workload_common.insert_many(host=host, port=port, database="test", table="stress", count=20000) cluster.check() print "Decreasing replication factor..." http.set_table_affinities(ns, {dc: 0}) time.sleep(3) cluster.check() print "Increasing replication factor again..." http.set_table_affinities(ns, {dc: 1}) print "Confirming that the progress meter indicates a backfill happening..." for i in xrange(100): progress = http.get_progress()
print("Creating db/table %s/%s (%.2fs)" % (dbName, tableName, time.time() - startTime)) if dbName not in r.db_list().run(conn): r.db_create(dbName).run(conn) if tableName in r.db(dbName).table_list().run(conn): r.db(dbName).table_drop(tableName).run(conn) r.db(dbName).table_create(tableName).run(conn) print("Inserting data (%.2fs)" % (time.time() - startTime)) rdb_workload_common.insert_many(host=server.host, port=server.driver_port, database=dbName, table=tableName, count=10000, conn=conn) print("Sharding table (%.2fs)" % (time.time() - startTime)) r.db(dbName).reconfigure(shards=numNodes, replicas=numNodes).run(conn) r.db(dbName).wait().run(conn) print("Starting workload (%.2fs)" % (time.time() - startTime)) workload_ports = workload_runner.RDBPorts(host=server.host, http_port=server.http_port, rdb_port=server.driver_port, db_name=dbName, table_name=tableName)
#!/usr/bin/env python import sys, socket, random, time, os sys.path.append( os.path.abspath( os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import rdb_workload_common from vcoptparse import * op = rdb_workload_common.option_parser_for_connect() op["count"] = IntFlag("--count", 10000) opts = op.parse(sys.argv) if __name__ == '__main__': with rdb_workload_common.make_table_and_connection(opts) as (table, conn): rdb_workload_common.insert_many(conn=conn, table=table, count=opts['count'])
print "Creating namespace..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in [process1, process2]]) dc = http.add_datacenter() http.move_server_to_datacenter(process1.files.machine_name, dc) http.move_server_to_datacenter(process2.files.machine_name, dc) db = http.add_database() ns = http.add_table(primary = dc, affinities = {dc: 1}, ack_expectations = {dc: 2}, database=db.name) http.do_query("POST", "/ajax/semilattice/rdb_namespaces/%s/primary_pinnings" % ns.uuid, {"[\"\",null]": http.find_machine(process1.files.machine_name).uuid}) http.wait_until_blueprint_satisfied(ns) cluster.check() http.check_no_issues() host, port = driver.get_table_host([process1, process2]) rdb_workload_common.insert_many(host=host, port=port, database=db.name, table=ns.name, count=10000) if opts["fast-workload"]: print "Stopping release-mode processes." process1.check_and_stop() process2.check_and_stop() print "Starting original-mode processes." process1 = driver.Process(cluster, files1, log_path = "serve-output-1", executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process2 = driver.Process(cluster, files2, log_path = "serve-output-2", executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process1.wait_until_started_up() process2.wait_until_started_up() http = http_admin.ClusterAccess([("localhost", p.http_port) for p in [process1, process2]]) ns = http.find_namespace(ns.name) print "OK, fast workload logic has done its job."
#!/usr/bin/env python import sys, socket, random, time, os sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import rdb_workload_common from vcoptparse import * op = rdb_workload_common.option_parser_for_connect() op["count"] = IntFlag("--count", 10000) opts = op.parse(sys.argv) if __name__ == '__main__': with rdb_workload_common.make_table_and_connection(opts) as (table, conn): rdb_workload_common.insert_many(conn=conn, table=table, count=opts['count'])
ns = http.add_table(primary=dc, affinities={dc: 1}, ack_expectations={dc: 2}, database=db.name) http.do_query( "POST", "/ajax/semilattice/rdb_namespaces/%s/primary_pinnings" % ns.uuid, {"[\"\",null]": http.find_machine(process1.files.machine_name).uuid}) http.wait_until_blueprint_satisfied(ns) cluster.check() http.check_no_issues() host, port = driver.get_table_host([process1, process2]) rdb_workload_common.insert_many(host=host, port=port, database=db.name, table=ns.name, count=10000) if opts["fast-workload"]: print "Stopping release-mode processes." process1.check_and_stop() process2.check_and_stop() print "Starting original-mode processes." process1 = driver.Process(cluster, files1, log_path="serve-output-1", executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) process2 = driver.Process(cluster,
db = http.add_database("test") dc = http.add_datacenter() for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) ns = http.add_table(primary = dc, name = "stress", database = db) time.sleep(3) host, port = driver.get_table_host(processes) cluster.check() print "Increasing replication factor..." http.set_table_affinities(ns, {dc: 1}) time.sleep(3) cluster.check() print "Inserting some data..." rdb_workload_common.insert_many(host=host, port=port, database="test", table="stress", count=20000) cluster.check() print "Decreasing replication factor..." http.set_table_affinities(ns, {dc: 0}) time.sleep(3) cluster.check() print "Increasing replication factor again..." http.set_table_affinities(ns, {dc: 1}) print "Confirming that the progress meter indicates a backfill happening..." for i in xrange(100): progress = http.get_progress() if len(progress) > 0: print "OK"
cluster, files[i], log_path = "serve-output-%d" % i, executable_path = driver.find_rethinkdb_executable()) for i in xrange(num_nodes)] time.sleep(10) print "Creating table..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) dc = http.add_datacenter() for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) db = http.add_database(name="test") ns = http.add_table(primary = dc, name="test", primary_key="foo") time.sleep(10) host, port = driver.get_table_host(processes) cluster.check() rdb_workload_common.insert_many(host=host, port=port, table="test", count=10000) print "Splitting into two shards..." http.add_table_shard(ns, "t") time.sleep(10) cluster.check() print "Merging shards together again..." http.remove_table_shard(ns, "t") time.sleep(10) cluster.check() cluster.check_and_stop()