def test_workload(self): workloadServer = self.cluster[0] # -- add a proxy node if called for if opts["use-proxy"]: utils.print_with_time('Using proxy') workloadServer = driver.ProxyProcess(self.cluster, console_output='proxy-output', command_prefix=command_prefix, extra_options=serve_options) self.cluster.wait_until_ready() # -- run workload workload_runner.run(opts["workload"], workloadServer, opts["timeout"], db_name=self.dbName, table_name=self.tableName) utils.print_with_time("Ended workload: %s" % opts["workload"])
def test_workload(self): server = self.cluster[0] utils.print_with_time("Running first workload") workload_runner.run(opts["workload1"], server, opts["timeout"], db_name=self.dbName, table_name=self.tableName) utils.print_with_time("Restarting server") server.check_and_stop() server.start() self.cluster.check() self.r.db(self.dbName).wait(wait_for="all_replicas_ready").run(self.conn) utils.print_with_time("Running second workload") workload_runner.run(opts["workload2"], server, opts["timeout"], db_name=self.dbName, table_name=self.tableName)
def test_workload(self): server = self.cluster[0] utils.print_with_time("Running first workload") workload_runner.run(opts["workload1"], server, opts["timeout"], db_name=self.dbName, table_name=self.tableName) utils.print_with_time("Restarting server") server.check_and_stop() server.start() self.cluster.check() self.r.wait().run(self.conn) utils.print_with_time("Running second workload") workload_runner.run(opts["workload2"], server, opts["timeout"], db_name=self.dbName, table_name=self.tableName)
print "Starting cluster..." files1 = driver.Files(metacluster, db_path = "db-first", log_path = "create-output-first", executable_path = executable_path, command_prefix = command_prefix) process1 = driver.Process(cluster, files1, log_path = "serve-output-first", executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process1.wait_until_started_up() print "Creating namespace..." http1 = http_admin.ClusterAccess([("localhost", process1.http_port)]) dc = http1.add_datacenter() http1.move_server_to_datacenter(files1.machine_name, dc) ns = scenario_common.prepare_table_for_workload(opts, http1, primary = dc) http1.wait_until_blueprint_satisfied(ns) workload_ports_1 = scenario_common.get_workload_ports(opts, ns, [process1]) workload_runner.run(opts["protocol"], opts["workload1"], workload_ports_1, opts["timeout"]) print "Bringing up new server..." files2 = driver.Files(metacluster, db_path = "db-second", log_path = "create-output-second", executable_path = executable_path, command_prefix = command_prefix) process2 = driver.Process(cluster, files2, log_path = "serve-output-second", executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process2.wait_until_started_up() http1.update_cluster_data(3) http1.move_server_to_datacenter(files2.machine_name, dc) http1.set_namespace_affinities(ns, {dc: 1}) http1.check_no_issues() print "Waiting for backfill..." backfill_start_time = time.time() http1.wait_until_blueprint_satisfied(ns, timeout = 3600)
log_path="create-output", executable_path=executable_path, command_prefix=command_prefix) process = driver.Process(cluster, files, executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) process.wait_until_started_up() print "Creating namespace..." http = http_admin.ClusterAccess([("localhost", process.http_port)]) dc = http.add_datacenter() http.move_server_to_datacenter(http.machines.keys()[0], dc) ns = scenario_common.prepare_table_for_workload(opts, http, primary=dc) http.wait_until_blueprint_satisfied(ns) workload_ports = scenario_common.get_workload_ports(opts, ns, [process]) workload_runner.run(opts["protocol"], opts["workload1"], workload_ports, opts["timeout"]) print "Restarting server..." process.check_and_stop() process2 = driver.Process(cluster, files, executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) process2.wait_until_started_up() http.wait_until_blueprint_satisfied(ns) workload_runner.run(opts["protocol"], opts["workload2"], workload_ports, opts["timeout"]) cluster.check_and_stop()
utils.print_with_time("Establishing ReQL connection") conn1 = r.connect(server1.host, server1.driver_port) utils.print_with_time("Creating db/table %s/%s" % (dbName, tableName)) if dbName not in r.db_list().run(conn1): r.db_create(dbName).run(conn1) if tableName in r.db(dbName).table_list().run(conn1): r.db(dbName).table_drop(tableName).run(conn1) r.db(dbName).table_create(tableName).run(conn1) utils.print_with_time("Starting first workload") workload_runner.run(opts["workload1"], workload_ports1, opts["timeout"]) utils.print_with_time("Bringing up new server") server2 = driver.Process(cluster=cluster, name='second', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) issues = list( r.db('rethinkdb').table('current_issues').filter( r.row["type"] != "memory_error").run(conn1)) assert [] == issues, 'The issues list was not empty: %s' % repr(issues) utils.print_with_time("Explicitly adding server to the table")
utils.print_with_time("Establishing ReQL connection") conn1 = r.connect(server1.host, server1.driver_port) utils.print_with_time("Creating db/table %s/%s" % (dbName, tableName)) if dbName not in r.db_list().run(conn1): r.db_create(dbName).run(conn1) if tableName in r.db(dbName).table_list().run(conn1): r.db(dbName).table_drop(tableName).run(conn1) r.db(dbName).table_create(tableName).run(conn1) utils.print_with_time("Starting first workload") workload_runner.run(opts["workload1"], workload_ports1, opts["timeout"]) utils.print_with_time("Bringing up new server") server2 = driver.Process(cluster=cluster, name='second', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) issues = list(r.db('rethinkdb').table('current_issues').filter(r.row["type"] != "memory_error").run(conn1)) assert [] == issues, 'The issues list was not empty: %s' % repr(issues) utils.print_with_time("Explicitly adding server to the table") assert r.db(dbName).table(tableName).config() \ .update({'shards':[ {'primary_replica':server2.name, 'replicas':[server2.name, server1.name]} ]})['errors'].run(conn1) == 0 utils.print_with_time("Waiting for backfill")
executable_path=executable_path, command_prefix=command_prefix) serve_process = driver.Process(cluster, serve_files, log_path="serve-output", executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) proxy_process = driver.ProxyProcess(cluster, 'proxy-logfile', log_path='proxy-output', executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) processes = [serve_process, proxy_process] for process in processes: process.wait_until_started_up() print "Creating table..." http = http_admin.ClusterAccess([("localhost", proxy_process.http_port)]) dc = http.add_datacenter() for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) ns = scenario_common.prepare_table_for_workload(http, primary=dc) http.wait_until_blueprint_satisfied(ns) workload_ports = scenario_common.get_workload_ports(ns, [proxy_process]) workload_runner.run(opts["workload"], workload_ports, opts["timeout"]) cluster.check_and_stop()
for i in xrange(num_nodes)] time.sleep(10) print "Creating namespace..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) dc = http.add_datacenter() for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) ns = http.add_namespace(protocol = "memcached", primary = dc) time.sleep(10) host, port = driver.get_namespace_host(ns.port, processes) cluster.check() workload_ports = workload_runner.MemcachedPorts( "localhost", processes[0].http_port, ns.port + processes[0].port_offset) workload_runner.run("memcached", opts["workload"], workload_ports, opts["timeout"]) cluster.check() print "Splitting into two shards..." http.add_namespace_shard(ns, "t") time.sleep(10) cluster.check() print "Merging shards together again..." http.remove_namespace_shard(ns, "t") time.sleep(10) cluster.check() cluster.check_and_stop()
print "Starting cluster..." processes = [driver.Process(cluster, driver.Files(metacluster, db_path = "db-%d" % i, log_path = "create-output-%d" % i, executable_path = executable_path, command_prefix = command_prefix), log_path = "serve-output-%d" % i, executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) for i in xrange(opts["num-nodes"])] if opts["use-proxy"]: proxy_process = driver.ProxyProcess(cluster, 'proxy-logfile', log_path = 'proxy-output', executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) processes.append(proxy_process) for process in processes: process.wait_until_started_up() print "Creating table..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) dc = http.add_datacenter() for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) ns = scenario_common.prepare_table_for_workload(http, primary = dc) for i in xrange(opts["num-shards"] - 1): http.add_table_shard(ns, chr(ord('a') + 26 * i // opts["num-shards"])) http.wait_until_blueprint_satisfied(ns) workload_ports = scenario_common.get_workload_ports(ns, processes if not opts["use-proxy"] else [proxy_process]) workload_runner.run(opts["workload"], workload_ports, opts["timeout"]) cluster.check_and_stop()