Пример #1
0
 def run(self, result=None):
     if self.tables:
         defaultDb, defaultTable = utils.get_test_db_table()
         
         if not self.dbName:
             self.dbName = defaultDb
         
         if isinstance(self.tables, (int, long)):
             if self.tables == 1:
                 self.tableNames = [defaultTable]
             else:
                 self.tableNames = ['%s_%d' % (defaultTable, i) for i in range(1, self.tables + 1)]
         elif isinstance(self.tables, (str, unicode)):
             self.tableNames = [self.tables]
         elif hasattr(self.tables, '__iter__'):
             self.tableNames = [str(x) for x in self.tables]
         else:
             raise Exception('The value of tables was not recogised: %r' % self.tables)
         
         self.tableName = self.tableNames[0]
     
     # Allow detecting test failure in tearDown
     self.__currentResult = result or self.defaultTestResult()
     self.__problemCount = 0 if result is None else len(self.__currentResult.errors) + len(self.__currentResult.failures)
     
     super(RdbTestCase, self).run(self.__currentResult)
Пример #2
0
    def run(self, result=None):
        if self.tables:
            defaultDb, defaultTable = utils.get_test_db_table()

            if not self.dbName:
                self.dbName = defaultDb

            if isinstance(self.tables, (int, long)):
                if self.tables == 1:
                    self.tableNames = [defaultTable]
                else:
                    self.tableNames = [
                        '%s_%d' % (defaultTable, i)
                        for i in range(1, self.tables + 1)
                    ]
            elif isinstance(self.tables, (str, unicode)):
                self.tableNames = [self.tables]
            elif hasattr(self.tables, '__iter__'):
                self.tableNames = [str(x) for x in self.tables]
            else:
                raise Exception('The value of tables was not recogised: %r' %
                                self.tables)

            self.tableName = self.tableNames[0]

        # Allow detecting test failure in tearDown
        self.__currentResult = result or self.defaultTestResult()
        self.__problemCount = 0 if result is None else len(
            self.__currentResult.errors) + len(self.__currentResult.failures)

        super(RdbTestCase, self).run(self.__currentResult)
Пример #3
0
    def run(self, result=None):

        if not all([self.dbName, self.tableName]):
            defaultDb, defaultTable = utils.get_test_db_table()

            if self.dbName is None:
                self.__class__.dbName = defaultDb
            if self.tableName is None:
                self.__class__.tableName = defaultTable

        self.__class__.table = self.r.db(self.dbName).table(self.tableName)

        # Allow detecting test failure in tearDown
        self.__currentResult = result or self.defaultTestResult()
        self.__problemCount = 0 if result is None else len(
            self.__currentResult.errors) + len(self.__currentResult.failures)

        super(RdbTestCase, self).run(self.__currentResult)
Пример #4
0
    '--progress',
    False)  # Write messages every 10 seconds with the time remaining
opts['threads'] = vcoptparse.IntFlag(
    '--threads',
    16)  # Number of client threads to run (not counting changefeeds)
opts['changefeeds'] = vcoptparse.BoolFlag(
    '--changefeeds', False)  # Whether or not to use changefeeds
opts['kill'] = vcoptparse.BoolFlag(
    '--kill', False
)  # Randomly kill and revive servers during fuzzing - will produce a lot of noise
parsed_opts = opts.parse(sys.argv)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(
    parsed_opts)

r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()

server_names = list(string.ascii_lowercase[:parsed_opts['servers']])

system_tables = [
    'table_config', 'server_config', 'db_config', 'cluster_config',
    'table_status', 'server_status', 'current_issues', 'jobs', 'stats', 'logs',
    '_debug_table_status'
]

# Global data used by query generators, and a lock to make it thread-safe
data_lock = threading.Lock()
dbs = set()
tables = set()
indexes = set()
Пример #5
0
import os, sys, time

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse, workload_runner

op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
op["workload1"] = vcoptparse.StringFlag("--workload-before", None)
op["workload2"] = vcoptparse.StringFlag("--workload-after", None)
op["timeout"] = vcoptparse.IntFlag("--timeout", 600)
opts = op.parse(sys.argv)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(opts)

r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()

utils.print_with_time("Starting cluster with one server")
with driver.Cluster(initial_servers=['first'], output_folder='.', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as cluster:
    
    server1 = cluster[0]
    workload_ports1 = workload_runner.RDBPorts(host=server1.host, http_port=server1.http_port, rdb_port=server1.driver_port, db_name=dbName, table_name=tableName)
    
    utils.print_with_time("Establishing ReQL connection")
    
    conn1 = r.connect(server1.host, server1.driver_port)
    
    utils.print_with_time("Creating db/table %s/%s" % (dbName, tableName))
    
    if dbName not in r.db_list().run(conn1):
        r.db_create(dbName).run(conn1)
Пример #6
0
import threading, os, sys, time

sys.path.append(
    os.path.abspath(
        os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse

r = utils.import_python_driver()

op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(
    op.parse(sys.argv))

db, _ = utils.get_test_db_table()
query_server = None


def create_tables(conn):
    r.db(db).table_create('single').run(conn)
    r.db(db).table_create('majority').run(conn)
    r.db(db).reconfigure(replicas={
        'primary': 1,
        'replica': 2,
        'nonvoting': 3
    },
                         primary_replica_tag='primary',
                         nonvoting_replica_tags=['nonvoting'],
                         shards=1).run(conn)
    r.db(db).table('single').config().update({
Пример #7
0
sys.path.append(
    os.path.abspath(
        os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse

op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, server_options = scenario_common.parse_mode_flags(
    op.parse(sys.argv))

r = utils.import_python_driver()

# == settings

docs_per_table = 20
dbName, _ = utils.get_test_db_table()

# == helpers


def make_table(name, shards, conn):
    """Create a table named "name" with the given shard configuration, and populateit with some data."""
    utils.print_with_time("Preparing table '%s'" % name)
    res = r.db("rethinkdb").table("table_config").insert({
        "name": name,
        "db": dbName,
        "shards": shards
    }).run(conn)
    assert res.get("inserted") == 1, res
    res = r.table(name).wait(wait_for="all_replicas_ready").run(conn)
    assert res.get("ready") == 1, res
Пример #8
0
#!/usr/bin/env python
# Copyright 2010-2016 RethinkDB, all rights reserved.

# This file tests the `rethinkdb.stats` admin table.
# Here, we run very particular queries and verify that the 'total' stats are exactly
# correct.  This includes point reads/writes, range reads/replaces, backfills, and
# sindex construction.

import sys, os, time, re, multiprocessing, random, pprint

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse, workload_runner

r = utils.import_python_driver()

db_name, table_name = utils.get_test_db_table()
server_names = ['grey', 'face']

op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, server_options = scenario_common.parse_mode_flags(op.parse(sys.argv))

with driver.Cluster(initial_servers=server_names, output_folder='.', command_prefix=command_prefix, extra_options=serve_options) as cluster:
    
    r.connect(host=cluster[0].host, port=cluster[0].driver_port).repl()

    r.db_create(db_name).run()
    r.db(db_name).table_create(table_name).run()
    tbl = r.db(db_name).table(table_name)
    table_id = tbl.config()['id'].run()