Exemple #1
0
    def test_workload(self):
        alpha = self.getPrimaryForShard(0)
        beta = self.getReplicaForShard(0)

        workload_ports = workload_runner.RDBPorts(host=alpha.host,
                                                  http_port=alpha.http_port,
                                                  rdb_port=alpha.driver_port,
                                                  db_name=self.dbName,
                                                  table_name=self.tableName)
        with workload_runner.SplitOrContinuousWorkload(
                opts, workload_ports) as workload:
            utils.print_with_time('Workloads:\n%s' %
                                  pprint.pformat(workload.opts))
            utils.print_with_time("Running before workload")
            workload.run_before()
            utils.print_with_time("Before workload complete")
            self.checkCluster()
            workload.check()

            utils.print_with_time("Demoting primary")
            shardConfig = self.table.config()['shards'].run(self.conn)
            shardConfig[0]['primary_replica'] = beta.name
            self.table.config().update({'shards': shardConfig}).run(self.conn)
            self.table.wait(wait_for='all_replicas_ready').run(self.conn)
            self.checkCluster()

            utils.print_with_time("Running after workload")
            workload.run_after()
            self.checkCluster()
            utils.print_with_time("After workload complete")
Exemple #2
0
def get_workload_ports(processes, tableName, dbName='test'):
    for process in processes:
        assert isinstance(process, (driver.Process, driver.ProxyProcess))
    process = random.choice(processes)
    return workload_runner.RDBPorts(
        host = process.host,
        http_port = process.http_port,
        rdb_port = process.driver_port,
        table_name = tableName,
        db_name = dbName)
Exemple #3
0
def get_workload_ports(table, processes):
    for process in processes:
        assert isinstance(process, (driver.Process, driver.ProxyProcess))
    process = random.choice(processes)
    return workload_runner.RDBPorts(
        host = "localhost",
        http_port = process.http_port,
        rdb_port = process.driver_port,
        table_name = table.name,
        db_name = "test")
Exemple #4
0
def get_workload_ports(parsed_opts, namespace, processes):
    for process in processes:
        assert isinstance(process, (driver.Process, driver.ProxyProcess))
    process = random.choice(processes)
    assert namespace.protocol == parsed_opts["protocol"]
    if parsed_opts["protocol"] == "memcached":
        return workload_runner.MemcachedPorts(host="localhost",
                                              http_port=process.http_port,
                                              memcached_port=namespace.port +
                                              process.port_offset)
    else:
        return workload_runner.RDBPorts(host="localhost",
                                        http_port=process.http_port,
                                        rdb_port=28015 + process.port_offset,
                                        table_name=namespace.name,
                                        db_name="test")
    def test_kill_secondary(self):
        
        primary = self.getPrimaryForShard(0)
        secondary = self.getReplicaForShard(0)
        
        conn = self.r.connect(host=primary.host, port=primary.driver_port)
        
        issues = list(self.r.db('rethinkdb').table('current_issues').run(conn))
        self.assertEqual(issues, [], 'The issues list was not empty: %r' % issues)

        workload_ports = workload_runner.RDBPorts(host=primary.host, http_port=primary.http_port, rdb_port=primary.driver_port, db_name=self.dbName, table_name=self.tableName)
        with workload_runner.SplitOrContinuousWorkload(opts, workload_ports) as workload:
            
            print_with_time("Starting workload")
            workload.run_before()
            
            self.cluster.check()
            issues = list(self.r.db('rethinkdb').table('current_issues').run(conn))
            self.assertEqual(issues, [], 'The issues list was not empty: %r' % issues)
        
            print_with_time("Killing the secondary")
            secondary.kill()
            
            print_with_time("Checking that the table_availability issue shows up")
            deadline = time.time() + 5
            last_error = None
            while time.time() < deadline:
                try:
                    issues = list(self.r.db('rethinkdb').table('current_issues').filter({'type':'table_availability', 'info':{'db':self.dbName, 'table':self.tableName}}).run(conn))
                    self.assertEqual(len(issues), 1, 'The server did not record the single issue for the killed secondary server:\n%s' % pformat(issues))
                    
                    issue = issues[0]
                    self.assertEqual(issue['critical'], False)
                    self.assertEqual(issue['info']['status']['ready_for_reads'], True)
                    self.assertEqual(issue['info']['status']['ready_for_writes'], True)
                    
                    break
                except Exception as e:
                    last_error = e
                    time.sleep(.2)
            else:
                raise last_error
            
            print_with_time("Running after workload")
            workload.run_after()
        print_with_time("Done")
    r.db(dbName).table_create(tableName).run(conn)

    print("Pinning database to only the first server (%.2fs)" % (time.time() - startTime))
    assert r.db(dbName).table(tableName).config() \
        .update({'shards':
            [{'primary_replica':database_server.name, 'replicas':[database_server.name]}]
        }).run(conn)['errors'] == 0
    r.db(dbName).wait().run(conn)
    cluster.check()
    assert [] == list(r.db('rethinkdb').table('current_issues').run(conn))

    sys.stderr.write('before workoad: %s\n' % (repr(list(r.db(dbName).table(tableName).config().run(conn)))))

    print("Starting workload (%.2fs)" % (time.time() - startTime))

    workload_ports = workload_runner.RDBPorts(host=database_server.host, http_port=database_server.http_port, rdb_port=database_server.driver_port, db_name=dbName, table_name=tableName)
    with workload_runner.ContinuousWorkload(opts["workload"], workload_ports) as workload:
        workload.start()

        print("Running workload for 10 seconds (%.2fs)" % (time.time() - startTime))
        time.sleep(10)
        cluster.check()
        assert [] == list(r.db('rethinkdb').table('current_issues').run(conn))

        print("Killing the access server (%.2fs)" % (time.time() - startTime))
        access_server.kill()

        issues = list(r.db('rethinkdb').table('current_issues').run(conn))
        assert len(issues) == 0, 'Issues were raised when server stopped: %s' % repr(issues)

        # Don't bother stopping the workload, just exit and it will get killed
Exemple #7
0
    assert r.db(dbName).table(tableName).config() \
        .update({'shards':[
            {'primary_replica':primary.name, 'replicas':[primary.name, replicaPool[0].name]}
        ]}).run(conn)['errors'] == 0

    r.db(dbName).wait().run(conn)
    cluster.check()
    issues = list(r.db('rethinkdb').table('current_issues').run(conn))
    assert len(
        issues) == 0, 'There were issues on the server: %s' % str(issues)

    print('Starting workload (%.2fs)' % (time.time() - startTime))

    workload_ports = workload_runner.RDBPorts(host=primary.host,
                                              http_port=primary.http_port,
                                              rdb_port=primary.driver_port,
                                              db_name=dbName,
                                              table_name=tableName)
    with workload_runner.SplitOrContinuousWorkload(opts,
                                                   workload_ports) as workload:

        workload.run_before()

        cluster.check()
        assert list(r.db('rethinkdb').table('current_issues').run(conn)) == []
        workload.check()

        current = opts["sequence"].initial
        for i, s in enumerate(opts["sequence"].steps):
            if i != 0:
                workload.run_between()
Exemple #8
0
_, command_prefix, serve_options = scenario_common.parse_mode_flags(opts)

r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()

utils.print_with_time("Starting cluster with one server")
with driver.Cluster(initial_servers=['first'],
                    output_folder='.',
                    command_prefix=command_prefix,
                    extra_options=serve_options,
                    wait_until_ready=True) as cluster:

    server1 = cluster[0]
    workload_ports1 = workload_runner.RDBPorts(host=server1.host,
                                               http_port=server1.http_port,
                                               rdb_port=server1.driver_port,
                                               db_name=dbName,
                                               table_name=tableName)

    utils.print_with_time("Establishing ReQL connection")

    conn1 = r.connect(server1.host, server1.driver_port)

    utils.print_with_time("Creating db/table %s/%s" % (dbName, tableName))

    if dbName not in r.db_list().run(conn1):
        r.db_create(dbName).run(conn1)

    if tableName in r.db(dbName).table_list().run(conn1):
        r.db(dbName).table_drop(tableName).run(conn1)
    r.db(dbName).table_create(tableName).run(conn1)
Exemple #9
0
    def test_failover(self):
        '''Run a workload while killing a server to cause a failover to a secondary'''

        # - setup

        primary = self.getPrimaryForShard(0)
        stable = self.getReplicaForShard(0)

        stableConn = self.r.connect(host=stable.host, port=stable.driver_port)

        workload_ports = workload_runner.RDBPorts(host=stable.host,
                                                  http_port=stable.http_port,
                                                  rdb_port=stable.driver_port,
                                                  db_name=dbName,
                                                  table_name=tableName)

        # - run test

        with workload_runner.SplitOrContinuousWorkload(
                opts, workload_ports) as workload:

            print_with_time("Starting workload before")
            workload.run_before()
            self.cluster.check()
            issues = list(
                self.r.db('rethinkdb').table('current_issues').run(stableConn))
            self.assertEqual(
                issues, [],
                'The server recorded the following issues after the run_before:\n%s'
                % pformat(issues))

            print_with_time("Shutting down the primary")
            primary.close()

            print_with_time(
                "Checking that the table_availability issue shows up")
            deadline = time.time() + 5
            last_error = None
            while time.time() < deadline:
                try:
                    issues = list(
                        self.r.db('rethinkdb').table('current_issues').filter({
                            'type':
                            'table_availability',
                            'info': {
                                'db': dbName,
                                'table': tableName
                            }
                        }).run(stableConn))
                    self.assertEqual(
                        len(issues), 1,
                        'The server did not record the single issue for the killed server:\n%s'
                        % pformat(issues))
                    break
                except Exception as e:
                    last_error = e
                    time.sleep(.2)
            else:
                raise last_error

            print_with_time("Waiting for the table to become available again")
            timeout = 30
            try:
                self.table.wait(wait_for='ready_for_writes',
                                timeout=timeout).run(stableConn)
            except self.r.ReqlRuntimeError as e:
                raise AssertionError(
                    'Table did not become available after %d seconds.' %
                    timeout)

            print_with_time("Running workload after")
            workload.run_after()

        print_with_time("Cleaning up")