def run_tests(build=None, data_dir='./'): global connection, servers_settings if not os.path.exists(data_dir): os.makedirs(data_dir) if not os.path.isdir(data_dir): raise ValueError('data_dir is not a directory: %s' % str(data_dir)) executable_path = utils.find_rethinkdb_executable( ) if build is None else os.path.realpath(os.path.join(build, 'rethinkdb')) if not os.path.basename( os.path.dirname(executable_path)).startswith('release'): sys.stderr.write('Warning: Testing a non-release build: %s\n' % executable_path) else: print('Testing: %s' % executable_path) i = 0 for settings in servers_settings: print("Starting server with cache_size " + str(settings["cache_size"]) + " MB...", end=' ') sys.stdout.flush() serverFiles = driver.Files(server_name=settings["name"], db_path=os.path.join( data_dir, settings["name"])) with driver.Process( files=serverFiles, executable_path=executable_path, extra_options=['--cache-size', str(settings["cache_size"])]) as server: print(" Done.\nConnecting...", end=' ') sys.stdout.flush() connection = r.connect(host="localhost", port=server.driver_port) print(" Done.") sys.stdout.flush() init_tables(connection) # Tests execute_read_write_queries(settings["name"]) if i == 0: execute_constant_queries() i = i + 1 save_compare_results()
#!/usr/bin/python -u # Copyright 2010-2012 RethinkDB, all rights reserved. import sys, os, time, tempfile, subprocess rethinkdb_root = os.path.abspath( os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) sys.path.append(os.path.join(rethinkdb_root, "test", "common")) import http_admin, driver from vcoptparse import * with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) print "Starting cluster..." num_nodes = 2 files = [ driver.Files(metacluster, db_path="db-%d" % i, log_path="create-output-%d" % i) for i in xrange(num_nodes) ] processes = [ driver.Process(cluster, files[i], log_path="serve-output-%d" % i, executable_path=driver.find_rethinkdb_executable()) for i in xrange(num_nodes) ] time.sleep(3) print "Creating table..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) db = http.add_database("test")
import http_admin, driver, workload_runner, scenario_common from vcoptparse import * op = OptParser() workload_runner.prepare_option_parser_for_split_or_continuous_workload(op) scenario_common.prepare_option_parser_mode_flags(op) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: print "Starting cluster..." cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags( opts) primary_files = driver.Files(metacluster, db_path="db-1", log_path="create-output-1", executable_path=executable_path, command_prefix=command_prefix) primary = driver.Process(cluster, primary_files, log_path="serve-output-1", executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) secondary_files = driver.Files(metacluster, db_path="db-2", log_path="create-output-2", executable_path=executable_path, command_prefix=command_prefix) secondary = driver.Process(cluster, secondary_files,
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import http_admin, driver, workload_runner, scenario_common, rdb_workload_common from vcoptparse import * op = OptParser() scenario_common.prepare_option_parser_mode_flags(op) workload_runner.prepare_option_parser_for_split_or_continuous_workload(op) op["num-nodes"] = IntFlag("--num-nodes", 2) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) print "Starting cluster..." processes = [driver.Process(cluster, driver.Files(metacluster, db_path = "db-%d" % i, log_path = "create-output-%d" % i, executable_path = executable_path, command_prefix = command_prefix), log_path = "serve-output-%d" % i, executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) for i in xrange(opts["num-nodes"])] for process in processes: process.wait_until_started_up() print "Creating table..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) primary_dc = http.add_datacenter() secondary_dc = http.add_datacenter() machines = http.machines.keys() http.move_server_to_datacenter(machines[0], primary_dc) http.move_server_to_datacenter(machines[1], secondary_dc) ns = scenario_common.prepare_table_for_workload(http, primary = primary_dc) http.wait_until_blueprint_satisfied(ns)
from vcoptparse import * op = OptParser() scenario_common.prepare_option_parser_mode_flags(op) op["workload1"] = PositionalArg() op["workload2"] = PositionalArg() op["timeout"] = IntFlag("--timeout", 600) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags( opts) print "Starting cluster..." files = driver.Files(metacluster, log_path="create-output", executable_path=executable_path, command_prefix=command_prefix) process = driver.Process(cluster, files, executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) process.wait_until_started_up() print "Creating namespace..." http = http_admin.ClusterAccess([("localhost", process.http_port)]) dc = http.add_datacenter() http.move_server_to_datacenter(http.machines.keys()[0], dc) ns = scenario_common.prepare_table_for_workload(opts, http, primary=dc) http.wait_until_blueprint_satisfied(ns) workload_ports = scenario_common.get_workload_ports(opts, ns, [process]) workload_runner.run(opts["protocol"], opts["workload1"], workload_ports,
op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) _, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) print('Spinning up %d processes...' % len(server_names)) servers = [] for i in xrange(len(server_names)): info = {'name': server_names[i]} info['files'] = driver.Files(metacluster, db_path='db-%d' % i, console_output='create-output-%d' % i, server_name=info['name'], command_prefix=command_prefix) info['process'] = driver.Process(cluster, info['files'], console_output='serve-output-%d' % i, command_prefix=command_prefix, extra_options=serve_options) servers.append(info) for server in servers: server['process'].wait_until_started_up() r.connect(servers[0]['process'].host, servers[0]['process'].driver_port).repl()
from memcached_workload_common import MemcacheConnection from vcoptparse import * op = OptParser() scenario_common.prepare_option_parser_mode_flags(op) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags( opts) print "Starting cluster..." processes = [ driver.Process(cluster, driver.Files(metacluster, log_path=("create-output-%d" % (i + 1)), executable_path=executable_path, command_prefix=command_prefix), executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) for i in xrange(2) ] for process in processes: process.wait_until_started_up() print "Creating namespace..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in processes]) dc = http.add_datacenter() for machine_id in http.machines: http.move_server_to_datacenter(machine_id, dc) ns = http.add_namespace(protocol="memcached", primary=dc) http.wait_until_blueprint_satisfied(ns)
op["sequence"] = vcoptparse.PositionalArg(converter=ReplicaSequence) opts = op.parse(sys.argv) _, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) r = utils.import_python_driver() dbName, tableName = utils.get_test_db_table() numReplicas = opts["sequence"].peak() print('Starting cluster (%.2fs)' % (time.time() - startTime)) with driver.Cluster(output_folder='.') as cluster: print('Starting primary server (%.2fs)' % (time.time() - startTime)) primary_files = driver.Files(cluster.metacluster, db_path="db-primary", console_output=True, command_prefix=command_prefix) primary_process = driver.Process(cluster, primary_files, console_output=True, command_prefix=command_prefix, extra_options=serve_options) print('Starting %d replicas (%.2fs)' % (numReplicas, time.time() - startTime)) replica_processes = [ driver.Process(cluster=cluster, console_output=True, command_prefix=command_prefix, extra_options=serve_options)
import http_admin, driver, workload_runner, scenario_common from vcoptparse import * op = OptParser() scenario_common.prepare_option_parser_mode_flags(op) op["workload1"] = PositionalArg() op["workload2"] = PositionalArg() op["timeout"] = IntFlag("--timeout", 600) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) print "Starting cluster..." files1 = driver.Files(metacluster, db_path = "db-first", log_path = "create-output-first", executable_path = executable_path, command_prefix = command_prefix) process1 = driver.Process(cluster, files1, log_path = "serve-output-first", executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process1.wait_until_started_up() print "Creating namespace..." http1 = http_admin.ClusterAccess([("localhost", process1.http_port)]) dc = http1.add_datacenter() http1.move_server_to_datacenter(files1.machine_name, dc) ns = scenario_common.prepare_table_for_workload(opts, http1, primary = dc) http1.wait_until_blueprint_satisfied(ns) workload_ports_1 = scenario_common.get_workload_ports(opts, ns, [process1]) workload_runner.run(opts["protocol"], opts["workload1"], workload_ports_1, opts["timeout"]) print "Bringing up new server..."
op = OptParser() workload_runner.prepare_option_parser_for_split_or_continuous_workload( op, allow_between=True) scenario_common.prepare_option_parser_mode_flags(op) op["sequence"] = PositionalArg(converter=ReplicaSequence) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: print "Starting cluster..." cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags( opts) primary_files = driver.Files(metacluster, db_path="db-primary", log_path="create-db-primary-output", executable_path=executable_path, command_prefix=command_prefix) primary_process = driver.Process(cluster, primary_files, log_path="serve-output-primary", executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) replica_processes = [ driver.Process(cluster, driver.Files(metacluster, db_path="db-%d" % i, log_path="create-output-%d" % i, executable_path=executable_path, command_prefix=command_prefix),
os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, http_admin, scenario_common from vcoptparse import * op = OptParser() scenario_common.prepare_option_parser_mode_flags(op) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags( opts) print "Spinning up two processes..." prince_hamlet_files = driver.Files(metacluster, machine_name="PrinceHamlet", db_path="prince-hamlet-db", log_path="prince-hamlet-create-output", executable_path=executable_path, command_prefix=command_prefix) prince_hamlet = driver.Process(cluster, prince_hamlet_files, log_path="prince-hamlet-log", executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) king_hamlet_files = driver.Files(metacluster, machine_name="KingHamlet", db_path="king-hamlet-db", log_path="king-hamlet-create-output", executable_path=executable_path, command_prefix=command_prefix) king_hamlet = driver.Process(cluster,
import http_admin, driver, workload_runner, scenario_common from vcoptparse import * op = OptParser() op["workload"] = PositionalArg() scenario_common.prepare_option_parser_mode_flags(op) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: print "Starting cluster..." cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags( opts) database_machine_files = driver.Files(metacluster, db_path="db-database", log_path="create-db-database-output", executable_path=executable_path, command_prefix=command_prefix) database_machine = driver.Process(cluster, database_machine_files, log_path="serve-output-database", executable_path=executable_path, command_prefix=command_prefix, extra_options=serve_options) access_machine_files = driver.Files(metacluster, db_path="db-access", log_path="create-db-access-output", executable_path=executable_path, command_prefix=command_prefix) access_machine = driver.Process(cluster, access_machine_files,
# Copyright 2010-2012 RethinkDB, all rights reserved. import sys, os, time sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import http_admin, driver, workload_runner, scenario_common from vcoptparse import * op = OptParser() workload_runner.prepare_option_parser_for_split_or_continuous_workload(op) scenario_common.prepare_option_parser_mode_flags(op) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: print "Starting cluster..." cluster = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) files1 = driver.Files(metacluster, db_path = "db-1", log_path = "create-output-1", executable_path = executable_path, command_prefix = command_prefix) process1 = driver.Process(cluster, files1, log_path = "serve-output-1", executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) files2 = driver.Files(metacluster, db_path = "db-2", log_path = "create-output-2", executable_path = executable_path, command_prefix = command_prefix) process2 = driver.Process(cluster, files2, log_path = "serve-output-2", executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) process1.wait_until_started_up() process2.wait_until_started_up() print "Creating namespace..." http = http_admin.ClusterAccess([("localhost", p.http_port) for p in [process1, process2]]) dc1 = http.add_datacenter() http.move_server_to_datacenter(process1.files.machine_name, dc1) dc2 = http.add_datacenter()
def check(self): if self.err is not None: print("Exception from other thread:") traceback.print_exception(*self.err) sys.exit(1) with driver.Metacluster() as metacluster: cluster1 = driver.Cluster(metacluster) _, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) print("Spinning up two processes...") files1 = driver.Files(metacluster, console_output="create-output-1", server_name="a", server_tags=["a_tag"], command_prefix=command_prefix) proc1 = driver.Process(cluster1, files1, console_output="serve-output-1", command_prefix=command_prefix, extra_options=serve_options) files2 = driver.Files(metacluster, console_output="create-output-2", server_name="b", server_tags=["b_tag"], command_prefix=command_prefix) proc2 = driver.Process(cluster1, files2, console_output="serve-output-2",
startTime = time.time() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, utils, scenario_common, vcoptparse op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) _, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv)) r = utils.import_python_driver() with driver.Cluster(output_folder='.') as cluster: print("Creating process files (%.2fs)" % (time.time() - startTime)) files = driver.Files(metacluster=cluster.metacluster, db_path="db", server_name="the_server", console_output="create-output", command_prefix=command_prefix) print("Setting resource limit (%.2fs)" % (time.time() - startTime)) size_limit = 10 * 1024 * 1024 resource.setrlimit(resource.RLIMIT_FSIZE, (size_limit, resource.RLIM_INFINITY)) print("Spinning up server process (which will inherit resource limit) (%.2fs)" % (time.time() - startTime)) process = driver.Process(cluster, files, console_output="log", extra_options=serve_options) conn = r.connect(process.host, process.driver_port) server_uuid = r.db("rethinkdb").table("server_config").nth(0)["id"].run(conn) log_file_path = os.path.join(files.db_path, "log_file.txt") print("Un-setting resource limit (%.2fs)" % (time.time() - startTime)) resource.setrlimit(resource.RLIMIT_FSIZE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) print("log size: %d" % os.path.getsize(log_file_path))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, scenario_common, utils, vcoptparse opts = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(opts) parsed_opts = opts.parse(sys.argv) _, command_prefix, serve_options = scenario_common.parse_mode_flags(parsed_opts) r = utils.import_python_driver() with driver.Metacluster() as metacluster: cluster = driver.Cluster(metacluster) print("Starting first server and creating a database (%.2fs)" % (time.time() - startTime)) files1 = driver.Files(metacluster, "my_server_name", db_path="server1_data", command_prefix=command_prefix, console_output=True) server1a = driver.Process(cluster, files1, command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True, console_output=True) conn1a = r.connect(host=server1a.host, port=server1a.driver_port) res = r.db_create("my_database_name").run(conn1a) db1_uuid = res["config_changes"][0]["new_val"]["id"] assert list(r.db("rethinkdb").table("current_issues").run(conn1a)) == [] print("Stopping first server (%.2fs)" % (time.time() - startTime)) server1a.check_and_stop() print("Starting second server and creating a database (%.2fs)" % (time.time() - startTime)) files2 = driver.Files(metacluster, "my_server_name", db_path="server2_data", command_prefix=command_prefix, console_output=True) server2a = driver.Process(cluster, files2,