コード例 #1
0
ファイル: cluster.test.py プロジェクト: Frankie-666/tarantool
print 'Sync master with replica'
print '-------------------------------------------------------------'

# Sync master with replica
replication_source = yaml.load(replica.admin('box.cfg.listen', silent=True))[0]
sys.stdout.push_filter(replication_source, '<replication_source>')
master.admin("box.cfg{ replication_source = '%s' }" % replication_source)

master.wait_lsn(replica_id, replica.get_lsn(replica_id))
master.admin('box.info.vclock[%d] == 1' % replica_id)
master.admin('box.info.vclock[%d] == 0' % replica_id2)
master.admin('box.info.vclock[%d] == nil' % replica_id3)

master.admin("box.cfg{ replication_source = '' }")
replica.stop()
replica.cleanup(True)

print '-------------------------------------------------------------'
print 'Start a new replica and check that server_id, LSN is re-used'
print '-------------------------------------------------------------'

#
# gh-1219: Proper removal of servers with non-zero LSN from _cluster
#
# Snapshot is required. Otherwise a relay will skip records made by previous
# replica with the re-used id.
master.admin("box.snapshot()")
master.admin('box.info.vclock[%d] == 1' % replica_id)

replica = TarantoolServer(server.ini)
replica.script = 'replication-py/replica.lua'
コード例 #2
0
replica.admin('box.info.vclock[%d] == 2' % replica_id)

print '-------------------------------------------------------------'
print 'Master must not crash then receives orphan rows from replica'
print '-------------------------------------------------------------'

replication_source = yaml.load(replica.admin('box.cfg.listen', silent=True))[0]
sys.stdout.push_filter(replication_source, '<replication>')
master.admin("box.cfg{ replication = '%s' }" % replication_source)

master.wait_lsn(replica_id, replica.get_lsn(replica_id))
master.admin('box.info.vclock[%d] == 2' % replica_id)

master.admin("box.cfg{ replication = '' }")
replica.stop()
replica.cleanup(True)

print '-------------------------------------------------------------'
print 'Start a new replica and check that server_id, LSN is re-used'
print '-------------------------------------------------------------'

#
# gh-1219: Proper removal of servers with non-zero LSN from _cluster
#
# Snapshot is required. Otherwise a relay will skip records made by previous
# replica with the re-used id.
master.admin("box.snapshot()")
master.admin('box.info.vclock[%d] == 2' % replica_id)

replica = TarantoolServer(server.ini)
replica.script = 'replication-py/replica.lua'
コード例 #3
0
ファイル: readonly.test.py プロジェクト: DarkDare/tarantool
replica_id = replica.get_param('server')['id']
replica.admin('box.info.server.id')
replica.admin('box.info.server.ro')
replica.admin('box.info.server.lsn')
replica.stop()

print '-------------------------------------------------------------'
print 'replica is read-only until receive self server_id in _cluster'
print '-------------------------------------------------------------'

# Remove xlog retrived by SUBSCRIBE
filename = str(0).zfill(20) + ".xlog"
wal = os.path.join(os.path.join(replica.vardir, replica.name), filename)
os.remove(wal)

# Start replica without master
server.stop()
replica.start()
replica.admin('box.cfg{replication_source = ""}')

# Check that replica in read-only mode
replica.admin('box.info.server.id')
replica.admin('box.info.server.ro')
replica.admin('box.info.server.lsn')
replica.admin('space = box.schema.space.create("ro")')
replica.admin('box.info.vclock[%d]' % replica_id)

replica.stop()
replica.cleanup(True)
server.deploy()
コード例 #4
0
master.admin("space = box.schema.space.create('test', {id =  42})")
master.admin("index = space:create_index('primary', { type = 'tree'})")

master.admin("for k = 1, 9 do space:insert{k, k*k} end")

replica = TarantoolServer(server.ini)
replica.script = "replication-py/replica.lua"
replica.vardir = server.vardir
replica.rpl_master = master
replica.deploy()
replica.admin("box.space.test:select()")

replica.restart()
replica.admin("box.space.test:select()")
replica.stop()
replica.cleanup()

print("-------------------------------------------------------------")
print("replica test 2 (must be ok)")
print("-------------------------------------------------------------")

master.restart()
master.admin("for k = 10, 19 do box.space[42]:insert{k, k*k*k} end")
master.admin("for k = 20, 29 do box.space[42]:upsert({k}, {}) end")
lsn = master.get_lsn(master_id)

replica = TarantoolServer(server.ini)
replica.script = "replication-py/replica.lua"
replica.vardir = server.vardir
replica.rpl_master = master
replica.deploy()
コード例 #5
0
print """
# Insert 10 tuples to hot_standby
"""
for i in range(id, id + 10):
    hot_standby_sql("insert into t0 values (%d, 'the tuple %d')" % (i, i))


print """
# Select 10 tuples from hot_standby
"""
for i in range(id, id + 10):
    hot_standby_sql("select * from t0 where k0 = %d" % i)


print """
# Select 10 tuples from replica
"""
replica.wait_lsn(21)
for i in range(id, id + 10):
    replica_sql("select * from t0 where k0 = %d" % i)


# Cleanup.
hot_standby.stop()
hot_standby.cleanup(True)
replica.stop()
replica.cleanup(True)
server.deploy(self.suite_ini["config"])

# vim: syntax=python
コード例 #6
0
ファイル: cluster.test.py プロジェクト: trollsid/tarantool
replica.admin('box.info.vclock[%d] == 2' % replica_id)

print '-------------------------------------------------------------'
print 'Master must not crash then receives orphan rows from replica'
print '-------------------------------------------------------------'

replication_source = yaml.safe_load(replica.admin('box.cfg.listen', silent = True))[0]
sys.stdout.push_filter(replication_source, '<replication>')
master.admin("box.cfg{ replication = '%s' }" % replication_source)

master.wait_lsn(replica_id, replica.get_lsn(replica_id))
master.admin('box.info.vclock[%d] == 2' % replica_id)

master.admin("box.cfg{ replication = '' }")
replica.stop()
replica.cleanup()

print '-------------------------------------------------------------'
print 'Start a new replica and check that server_id, LSN is re-used'
print '-------------------------------------------------------------'

#
# gh-1219: Proper removal of servers with non-zero LSN from _cluster
#
# Snapshot is required. Otherwise a relay will skip records made by previous
# replica with the re-used id.
master.admin("box.snapshot()")
master.admin('box.info.vclock[%d] == 2' % replica_id)

replica = TarantoolServer(server.ini)
replica.script = 'replication-py/replica.lua'
コード例 #7
0
# White while hot_standby server not bind masters ports
time.sleep(0.2)

print """
# Insert 10 tuples to hot_standby
"""
for i in range(id, id + 10):
    hot_standby_sql("insert into t0 values (%d, 'the tuple %d')" % (i, i))

print """
# Select 10 tuples from hot_standby
"""
for i in range(id, id + 10):
    hot_standby_sql("select * from t0 where k0 = %d" % i)

print """
# Select 10 tuples from replica
"""
replica.wait_lsn(21)
for i in range(id, id + 10):
    replica_sql("select * from t0 where k0 = %d" % i)

# Cleanup.
hot_standby.stop()
hot_standby.cleanup(True)
replica.stop()
replica.cleanup(True)
server.deploy(self.suite_ini["config"])

# vim: syntax=python
コード例 #8
0
ファイル: parallel.py プロジェクト: mingodad/tarantool
class Supervisor(object):
    def __init__(self, suite_path, args):
        self.args = args
        self.tests = []
        self.suite_path = suite_path
        self.ini = {
            'core': 'tarantool',
            'script': os.path.join(suite_path, 'parallel.lua'),
        }

        # read suite config
        config = ConfigParser.ConfigParser()
        config.read(os.path.join(suite_path, "suite.ini"))
        self.ini.update(dict(config.items("default")))
        self.ini.update(self.args.__dict__)
        self.jobs = int(self.ini.get('jobs', 1))
        self.count = int(self.ini.get('count', 0))

        for i in ["script"]:
            self.ini[i] = os.path.join(suite_path,
                                       self.ini[i]) if i in self.ini else None
        self.server = TarantoolServer(self.ini)
        self.pool = None
        self.iterator = None

    def find_tests(self):
        self.tests += [Parallel_PythonTest(k) \
                for k in sorted(glob.glob(os.path.join(self.suite_path, "*.test.py" )))]

    def take_rand(self):
        if self.count != 0:
            for test in self.tests:
                sql = self.server.sql.clone()
                admin = self.server.admin.clone()
                yield [test, [sql, admin]]
        else:
            while True:
                sql = self.server.sql.clone()
                admin = self.server.admin.clone()
                yield [random.choice(self.tests), [sql, admin]]

    def run_all(self):
        self.find_tests()
        if self.count != 0:
            self.tests *= self.count
            random.shuffle(self.tests)
        self.pool = GopherPool(processes=self.jobs)
        self.iterator = self.pool.run()
        self.filler = self.pool.fill(self.take_rand())
        try:
            self.server.cleanup()
            logger.info("Tarantool.Instance > Server cleaned up")
            logger.info("Tarantool.Instance > Server's path: %s",
                        self.server.binary)
            self.server.deploy()
            logger.info("Tarantool.Instance > Server deployed")
            try:
                while True:
                    self.filler.next()
                    logger.debug("BigBrother.run > Jobs filled %d %d" %
                                 (self.pool.queuein.qsize(),
                                  self.pool.queueout.qsize()))
                    while True:
                        try:
                            logger.debug("BigBrother.run > waiting for task")
                            task = self.iterator.next(1)
                            logger.debug("BigBrother.run > took task")
                            if task is None:
                                logger.info('>>>> Test return NONE')
                                continue
                            stat = task.get_status()
                            if stat.status != 3:
                                logger.info('>>>> Test %s finished' %
                                            repr(task.name))
                            else:
                                logger.error(
                                    '>>>> Test %s failed with %s (%s)' % (repr(
                                        task.name), stat.message, stat.reject))
                        except (QueueEmpty, StopIteration):
                            break
            except StopIteration:
                pass
        finally:
            self.server.stop()
            logger.info("Tarantool.Instance > Server stopped")
コード例 #9
0
ファイル: conflict.test.py プロジェクト: tarantool/tarantool
parallel_run(
    "box.space.test:update(2, {{'=', 2, 1}})",
    "box.space.test:update(2, {{'=', 2, 2}})",
    [lambda x,y: x == 'follow' and y == 'follow',]
)

# test4: CRDT increment with update
prepare_cluster()
parallel_run(
    "box.space.test:update(1, {{'+', 2, 1}})",
    "box.space.test:update(1, {{'+', 2, 2}})",
    [lambda x,y: x == 'follow' and y == 'follow',]
)
check_replication([master, replica], '1')

# test5: delete not existing key
prepare_cluster()
parallel_run(
    "box.space.test:delete(999)",
    "box.space.test:delete(999)",
    [lambda x,y: x == 'follow' and y == 'follow',]
)
check_replication([master, replica])

# cleanup
replica.stop()
replica.cleanup()
server.stop()
server.cleanup()
server.deploy()
コード例 #10
0
ファイル: parallel.py プロジェクト: mingodad/tarantool
class Supervisor(object):
    def __init__(self, suite_path, args):
        self.args = args
        self.tests = []
        self.suite_path = suite_path
        self.ini = {
                'core': 'tarantool',
                'script': os.path.join(suite_path, 'parallel.lua'),
        }

        # read suite config
        config = ConfigParser.ConfigParser()
        config.read(os.path.join(suite_path, "suite.ini"))
        self.ini.update(dict(config.items("default")))
        self.ini.update(self.args.__dict__)
        self.jobs = int(self.ini.get('jobs', 1))
        self.count = int(self.ini.get('count', 0))

        for i in ["script"]:
            self.ini[i] = os.path.join(suite_path, self.ini[i]) if i in self.ini else None
        self.server = TarantoolServer(self.ini)
        self.pool = None
        self.iterator = None

    def find_tests(self):
        self.tests += [Parallel_PythonTest(k) \
                for k in sorted(glob.glob(os.path.join(self.suite_path, "*.test.py" )))]

    def take_rand(self):
        if self.count != 0:
            for test in self.tests:
                sql = self.server.sql.clone()
                admin = self.server.admin.clone()
                yield [test, [sql, admin]]
        else:
            while True:
                sql = self.server.sql.clone()
                admin = self.server.admin.clone()
                yield [random.choice(self.tests), [sql, admin]]

    def run_all(self):
        self.find_tests()
        if self.count != 0:
            self.tests *= self.count
            random.shuffle(self.tests)
        self.pool = GopherPool(processes = self.jobs)
        self.iterator = self.pool.run()
        self.filler = self.pool.fill(self.take_rand())
        try:
            self.server.cleanup()
            logger.info("Tarantool.Instance > Server cleaned up")
            logger.info("Tarantool.Instance > Server's path: %s", self.server.binary)
            self.server.deploy()
            logger.info("Tarantool.Instance > Server deployed")
            try:
                while True:
                    self.filler.next()
                    logger.debug("BigBrother.run > Jobs filled %d %d" %
                            (self.pool.queuein.qsize(), self.pool.queueout.qsize()))
                    while True:
                        try:
                            logger.debug("BigBrother.run > waiting for task")
                            task = self.iterator.next(1)
                            logger.debug("BigBrother.run > took task")
                            if task is None:
                                logger.info('>>>> Test return NONE')
                                continue
                            stat = task.get_status()
                            if stat.status != 3:
                                logger.info('>>>> Test %s finished' % repr(task.name))
                            else:
                                logger.error('>>>> Test %s failed with %s (%s)' %
                                        (repr(task.name), stat.message, stat.reject))
                        except (QueueEmpty, StopIteration):
                            break
            except StopIteration:
                pass
        finally:
            self.server.stop()
            logger.info("Tarantool.Instance > Server stopped")