command_prefix=command_prefix) process = driver.Process(cluster, files, executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) process.wait_until_started_up() print "Creating table..." with r.connect('localhost', process.driver_port) as conn: r.db_create('test').run(conn) r.db('test').table_create('restart').run(conn) ns = TableShim(name='restart') workload_ports = scenario_common.get_workload_ports(ns, [process]) workload_runner.run(opts["workload1"], workload_ports, opts["timeout"]) print "Restarting server..." process.check_and_stop() process2 = driver.Process(cluster, files, executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) process2.wait_until_started_up() cluster.check() rdb_workload_common.wait_for_table(host="localhost", port=process2.driver_port, table=ns.name) workload_ports2 = scenario_common.get_workload_ports(ns, [process2]) workload_runner.run(opts["workload2"], workload_ports2, opts["timeout"]) print "Shutting down..." cluster.check_and_stop()
for process in processes: process.wait_until_started_up() print "Creating table..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) primary_dc = http.add_datacenter() secondary_dc = http.add_datacenter() machines = http.machines.keys() http.move_server_to_datacenter(machines[0], primary_dc) http.move_server_to_datacenter(machines[1], secondary_dc) ns = scenario_common.prepare_table_for_workload(http, primary = primary_dc) http.wait_until_blueprint_satisfied(ns) cluster.check() http.check_no_issues() workload_ports = scenario_common.get_workload_ports(ns, processes) with workload_runner.SplitOrContinuousWorkload(opts, workload_ports) as workload: workload.run_before() cluster.check() http.check_no_issues() http.move_table_to_datacenter(ns, secondary_dc) http.wait_until_blueprint_satisfied(ns) rdb_workload_common.wait_for_table(host=workload_ports.host, port=workload_ports.rdb_port, table=workload_ports.table_name) cluster.check() http.check_no_issues() workload.run_after() cluster.check_and_stop()
TableShim = collections.namedtuple('TableShim', ['name']) with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) print "Starting cluster..." files = driver.Files(metacluster, log_path = "create-output", executable_path = executable_path, command_prefix = command_prefix) process = driver.Process(cluster, files, executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process.wait_until_started_up() print "Creating table..." with r.connect('localhost', process.driver_port) as conn: r.db_create('test').run(conn) r.db('test').table_create('restart').run(conn) ns = TableShim(name='restart') workload_ports = scenario_common.get_workload_ports(ns, [process]) workload_runner.run(opts["workload1"], workload_ports, opts["timeout"]) print "Restarting server..." process.check_and_stop() process2 = driver.Process(cluster, files, executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process2.wait_until_started_up() cluster.check() rdb_workload_common.wait_for_table(host="localhost", port=process2.driver_port, table=ns.name) workload_ports2 = scenario_common.get_workload_ports(ns, [process2]) workload_runner.run(opts["workload2"], workload_ports2, opts["timeout"]) print "Shutting down..." cluster.check_and_stop()