def testCommandOverrideHset(env): conn = getConnectionByEnv(env) script = ''' import time def doHset(x): x += ['__time', time.time()] return call_next(*x[1:]) GB("CommandReader").map(doHset).register(hook="hset", mode="sync") ''' env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(env) conn.execute_command('hset', 'h1', 'foo', 'bar') conn.execute_command('hset', 'h2', 'foo', 'bar') conn.execute_command('hset', 'h3', 'foo', 'bar') res = conn.execute_command('hget', 'h1', '__time') env.assertNotEqual(res, None) res = conn.execute_command('hget', 'h2', '__time') env.assertNotEqual(res, None) res = conn.execute_command('hget', 'h3', '__time') env.assertNotEqual(res, None)
def testKeysReaderWithCommandsUnregister(self): script = ''' async def OverrideReply(x): res = await GB('ShardsIDReader').run() override_reply(len(res)) GB().foreach(OverrideReply).register(prefix='test*', eventTypes=['keymiss'], commands=['get', 'mget'], mode='async') ''' self.env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(self.env) res = self.conn.execute_command('get', 'x') self.env.assertEqual(res, None) res = self.conn.execute_command('get', 'test1') self.env.assertEqual(res, 2) registrations = self.env.cmd('RG.DUMPREGISTRATIONS') for r in registrations: self.env.expect('RG.UNREGISTER', r[1]).equal('OK') res = self.conn.execute_command('get', 'test1') self.env.assertEqual(res, None) self.env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(self.env) res = self.conn.execute_command('get', 'test1') self.env.assertEqual(res, 2)
def testBasicStream(env): conn = getConnectionByEnv(env) res = env.cmd('rg.pyexecute', "GearsBuilder()." "filter(lambda x:x['key'] != 'values' and x['type'] != 'empty')." "repartition(lambda x: 'values')." "foreach(lambda x: redisgears.executeCommand('lpush', 'values', x['value']))." "register('*')", 'UNBLOCKING') env.assertEqual(res, 'OK') if(res != 'OK'): return verifyRegistrationIntegrity(env) conn.execute_command('set', 'x', '1') conn.execute_command('set', 'y', '2') conn.execute_command('set', 'z', '3') res = [] while len(res) < 6: res = env.cmd('rg.dumpexecutions') for e in res: env.broadcast('rg.getresultsblocking', e[1]) env.cmd('rg.dropexecution', e[1]) env.assertEqual(set(conn.lrange('values', '0', '-1')), set(['1', '2', '3'])) # delete all registrations so valgrind check will pass registrations = env.cmd('RG.DUMPREGISTRATIONS') for r in registrations: env.expect('RG.UNREGISTER', r[1]).equal('OK')
def testStreamReaderAsync(env): env.skipOnCluster() conn = getConnectionByEnv(env) script = ''' fdata = [] class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata f = BlockHolder(gearsFuture()) fd = (f, r) fdata.insert(0, fd) return f.bc GB('CommandReader').map(lambda a: fdata.pop()).foreach(lambda x: x[0].continueRun(x[1])).register(trigger='unblock') GB('StreamReader').map(bc).foreach(lambda x: execute('set', x['value']['key'], x['value']['val'])).register(mode='sync', prefix='s') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) env.cmd('xadd', 's', '*', 'key', 'x', 'val', '1') env.cmd('xadd', 's', '*', 'key', 'y', 'val', '2') env.cmd('xadd', 's', '*', 'key', 'z', 'val', '3') try: with TimeLimit(50): env.cmd('RG.TRIGGER', 'unblock') x = None while x != '1': x = env.cmd('get', 'x') time.sleep(0.1) env.cmd('RG.TRIGGER', 'unblock') y = None while y != '2': y = env.cmd('get', 'y') time.sleep(0.1) env.cmd('RG.TRIGGER', 'unblock') z = None while z != '3': z = env.cmd('get', 'z') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting to reach unblock')
def testSimpleAsyncOnFilter(env): conn = getConnectionByEnv(env) script = ''' fdata = [] class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata f = BlockHolder(gearsFuture()) fdata.insert(0, f) return f.bc def unbc(r): global fdata try: f = fdata.pop() except Exception as e: return 0 if f: f.continueRun(True if r[1] == 'true' else False) return 1 return 0 GB('CommandReader').flatmap(lambda x: execute('keys', '*')).collect().filter(bc).count().register(trigger='block') GB('CommandReader').map(unbc).register(trigger='unblock') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) conn.execute_command('set', 'x', '1') conn.execute_command('set', 'y', '2') conn.execute_command('set', 'z', '3') def Block(): env.expect('RG.TRIGGER', 'block').equal(['3']) try: with Background(Block) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('RG.TRIGGER', 'unblock', 'true') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting to reach unblock')
def testSetFutureErrorOnAggregateByResultsBeforeReturnIt(env): conn = getConnectionByEnv(env) script = ''' class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def continueFailed(self, r): if self.bc: self.bc.continueFailed(r) blocked = [] def WaitForKeyChangeReturnSame(r, *args): f = gearsFuture() blocked.append(BlockHolder(f)) return f GB('CommandReader').groupby(lambda x: 'key', WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeAccumulateby', mode='async_local') async def ForEachFailed(r): def unblock(x): global blocked try: [a.continueFailed('Failed') for a in blocked] blocked = [] except Exception as e: print(e) await GB('ShardsIDReader').map(lambda x: r).foreach(unblock).run() GB().foreach(ForEachFailed).register('y', mode='async_local') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def WaitForKeyAccumulateby(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulateby').equal('Failed') try: with Background(WaitForKeyAccumulateby) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('set', 'y', '1') time.sleep(0.1) except Exception as e: env.assertTrue( False, message='Failed waiting for WaitForKeyChange to reach unblock')
def testAsyncWithRepartition2(env): conn = getConnectionByEnv(env) script = ''' fdata = [] class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata f = BlockHolder(gearsFuture()) fdata.append((f, r)) return f.bc def unbc(r): num = 0 while len(fdata) > 0: a = fdata.pop() a[0].continueRun(a[1]) num += 1 return num GB('CommandReader').flatmap(lambda x: execute('keys', '*')).map(lambda x: execute('get', x)).repartition(lambda x: x).map(bc).count().register(trigger='block') GB('CommandReader').map(unbc).register(trigger='unblock') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) for i in range(10000): conn.execute_command('set', i, i) def Block(): env.expect('RG.TRIGGER', 'block').equal(['10000']) try: with Background(Block) as bk1: with Background(Block) as bk2: with TimeLimit(50): while bk1.isAlive or bk2.isAlive: conn.execute_command('RG.TRIGGER', 'unblock') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting to reach unblock')
def testKeysReaderWithCommandsOptionOnAsynExecution(self): script = ''' GB().foreach(lambda x: override_reply('key does not exists')).register(eventTypes=['keymiss'], commands=['get', 'mget'], mode='async') ''' self.env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(self.env) res = self.conn.execute_command('get', 'x') self.env.assertEqual(res, 'key does not exists')
def testCreateAsyncRecordMoreThenOnceRaiseError(env): conn = getConnectionByEnv(env) script = ''' class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) blocked = [] def WaitForKeyChangeReturnSame(r, *args): f1 = gearsFuture() blocked.append(BlockHolder(f1)) f2 = gearsFuture() blocked.append(BlockHolder(f2)) return r GB('CommandReader').map(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeMap', mode='async_local') def ForEach(r): def unblock(x): global blocked try: [a.continueRun(x['key']) for a in blocked] blocked = [] except Exception as e: print(e) GB('ShardsIDReader').map(lambda x: r).foreach(unblock).run() GB().foreach(ForEach).register(mode='async_local') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def WaitForKeyMap(): env.expect('RG.TRIGGER', 'WaitForKeyChangeMap').error().contains( 'Can not create async record twice on the same step') try: with Background(WaitForKeyMap) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('set', 'x', '1') time.sleep(0.1) except Exception as e: env.assertTrue( False, message='Failed waiting for WaitForKeyChange to reach unblock')
def testSimpleAsyncOnSyncExecution(env): conn = getConnectionByEnv(env) script = ''' fdata = [] class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata f = BlockHolder(gearsFuture()) fdata.insert(0, (f, r)) return f.bc def unbc_internal(r): global fdata [a[0].continueRun(a[1]) for a in fdata] return r def unbc(r): f = BlockHolder(gearsFuture()) GB('ShardsIDReader').foreach(unbc_internal).count().foreach(lambda r: f.continueRun(r)).run() return f.bc GB('CommandReader').map(bc).register(trigger='block', mode='async_local') GB('CommandReader').map(unbc).register(trigger='unblock', mode='async_local') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def Block(): env.expect('RG.TRIGGER', 'block', 'arg').equal(["['block', 'arg']"]) try: with Background(Block) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('RG.TRIGGER', 'unblock') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting to reach unblock')
def testKeysReaderWithCommandsOptionOnSynExecutionOnMultiExec(self): self.env.skipOnCluster() script = ''' GB().foreach(lambda x: override_reply('key does not exists')).register(eventTypes=['keymiss'], commands=['get', 'mget'], mode='sync') ''' self.env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(self.env) self.env.cmd('multi') self.env.cmd('get', 'x') self.env.expect('exec').equal(['key does not exists'])
def testSimpleAsyncOnAggregate(env): conn = getConnectionByEnv(env) script = ''' class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) fdata = None def unbc(r): global fdata if fdata: fdata[2].continueRun(fdata[0] + fdata[1] * int(r[1])) fdata = None return 1 return 0 def doAggregate(a, r): global fdata fdata = (a if a else 0, r if r else 1, BlockHolder(gearsFuture())) return fdata[2].bc GB('CommandReader').flatmap(lambda x: [int(a) for a in x[1:]]).accumulate(doAggregate).collect().accumulate(doAggregate).register(trigger='block') GB('CommandReader').map(unbc).register(trigger='unblock') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def Block(): env.expect('RG.TRIGGER', 'block', '1', '2', '4').equal([str(28 * env.shardsCount)]) try: with Background(Block) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('RG.TRIGGER', 'unblock', '2') time.sleep(0.1) except Exception as e: print(e) env.assertTrue(False, message='Failed waiting to reach unblock')
def testSimpleAsyncOnForeach(env): conn = getConnectionByEnv(env) script = ''' fdata = None class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata fdata = BlockHolder(gearsFuture()) return fdata.bc def unbc(r): global fdata if fdata: fdata.continueRun(r) fdata = None return 1 return 0 GB('CommandReader').foreach(bc).flatmap(lambda x: x).distinct().count().register(trigger='block') GB('CommandReader').map(unbc).register(trigger='unblock') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def Block(): env.expect('RG.TRIGGER', 'block', '1').equal(['2']) try: with Background(Block) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('RG.TRIGGER', 'unblock', '1', '2', '3', '4', '5') time.sleep(0.1) except Exception as e: print(e) env.assertTrue(False, message='Failed waiting to reach unblock')
def testKeysReaderWithCommandsOptionOnAsyncAwait(self): script = ''' async def OverrideReply(x): res = await GB('ShardsIDReader').run() override_reply(len(res)) GB().foreach(OverrideReply).register(eventTypes=['keymiss'], commands=['get', 'mget'], mode='async') ''' self.env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(self.env) res = self.conn.execute_command('get', 'x') self.env.assertEqual(res, 2)
def testMultipleRegistrationsSameBuilderWithKeysReader(env): script = ''' gb = GB().foreach(lambda x: execute('del', x['key'])) gb.register(prefix='foo', readValue=False, mode='sync') gb.register(prefix='bar', readValue=False, mode='sync') ''' conn = getConnectionByEnv(env) env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(env) conn.execute_command('set', 'foo', '1') conn.execute_command('set', 'bar', '2') env.assertEqual(conn.execute_command('get', 'foo'), None) env.assertEqual(conn.execute_command('get', 'bar'), None)
def testMultipleRegistrationsSameBuilderWithStreamReader(env): script = ''' gb = GB('StreamReader').foreach(lambda x: execute('hset', '{%s}_hash' % x['key'], *sum([[k,v] for k,v in x['value'].items()], []))) gb.register(prefix='foo', mode='sync') gb.register(prefix='bar', mode='sync') ''' conn = getConnectionByEnv(env) env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(env) conn.execute_command('xadd', 'foo', '*', 'x', '1') conn.execute_command('xadd', 'bar', '*', 'x', '1') env.assertEqual(conn.execute_command('hgetall', '{foo}_hash'), {'x': '1'}) env.assertEqual(conn.execute_command('hgetall', '{bar}_hash'), {'x': '1'})
def testCreateAsyncWithoutFree(env): conn = getConnectionByEnv(env) script = ''' def WaitForKeyChangeReturnSame(r, *args): f1 = gearsFuture() return f1 GB('CommandReader').map(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeMap', mode='async_local') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) env.expect('RG.TRIGGER', 'WaitForKeyChangeMap').error().contains( 'Async record did not called continue')
def testSetFutureResultsBeforeReturnIt(env): conn = getConnectionByEnv(env) script = ''' def test(r, *args): f1 = gearsFuture() f1.continueRun(r) return f1 GB('CommandReader').map(test).register(trigger='test', mode='async_local') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) env.expect('RG.TRIGGER', 'test').error().contains( 'Can not handle future untill it returned from the callback')
def testMultipleRegistrationsSameBuilderWithCommandReader(env): script = ''' import time gb = GB('CommandReader').foreach(lambda x: call_next(x[1], '_time', time.time(), *x[2:])) gb.register(hook='hset', mode='sync') gb.register(hook='hmset', mode='sync') ''' conn = getConnectionByEnv(env) env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(env) conn.execute_command('hset', 'h1', 'x', '1') conn.execute_command('hmset', 'h2', 'x', '1') env.assertNotEqual(conn.execute_command('hget', 'h1', '_time'), None) env.assertNotEqual(conn.execute_command('hget', 'h2', '_time'), None)
def testAwaitOnAnotherExcecution(env): script = ''' async def doTest(x): res = await GB().map(lambda x: x['key']).run() if len(res[1]) > 0: raise Exception(res[1][0]) return res[0] GB("CommandReader").map(doTest).flatmap(lambda x: x).sort().register(trigger="test", mode="async_local") ''' conn = getConnectionByEnv(env) env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(env) conn.execute_command('set', 'x', '1') conn.execute_command('set', 'y', '2') conn.execute_command('set', 'z', '3') env.expect('rg.trigger', 'test').equal(['x', 'y', 'z'])
def testDeleteStreamDurringRun(env): env.skipOnCluster() script = ''' import time GB("StreamReader").foreach(lambda x: time.sleep(2)).register(prefix='s') ''' env.expect('rg.pyexecute', script).ok() verifyRegistrationIntegrity(env) env.cmd('xadd', 's', '*', 'foo', 'bar') time.sleep(1) env.cmd('flushall') # make sure all executions are done without crashing registration = env.cmd('RG.DUMPREGISTRATIONS')[0] while registration[7][3] != registration[7][5]: time.sleep(0.1) registration = env.cmd('RG.DUMPREGISTRATIONS')[0]
def testSimpleAsyncOnAggregateBy(env): conn = getConnectionByEnv(env) script = ''' class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) fdata = None def unbc(r): global fdata if fdata: fdata[1].continueRun(fdata[0] + 1) fdata = None return 1 return 0 def doGroupBy(k, a, r): global fdata fdata = (a if a else 0, BlockHolder(gearsFuture())) return fdata[1].bc def toDict(a, r): if a == None: a = {} currVal = a.get(r['key'], 0) a[r['key']] = currVal + r['value'] return a GB('CommandReader').flatmap(lambda x: [int(a) for a in x[1:]]).groupby(lambda x: x, doGroupBy).collect().accumulate(toDict).register(trigger='block') GB('CommandReader').map(unbc).register(trigger='unblock') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def Block(): res = env.cmd('RG.TRIGGER', 'block', '1', '1', '1', '2', '2', '3', '3', '3', '3') d = eval(res[0]) env.assertEqual(d['3'], 4 * env.shardsCount) env.assertEqual(d['1'], 3 * env.shardsCount) env.assertEqual(d['2'], 2 * env.shardsCount) try: with Background(Block) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('RG.TRIGGER', 'unblock') time.sleep(0.1) except Exception as e: print(e) env.assertTrue(False, message='Failed waiting to reach unblock')
def testSimpleAsyncWithNoneAsyncResult(env): conn = getConnectionByEnv(env) script = ''' class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def continueFailed(self, r): if self.bc: self.bc.continueFailed(r) blocked = [] def WaitForKeyChangeReturnSame(r, *args): f = gearsFuture() blocked.append(BlockHolder(f)) return r GB('CommandReader').map(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeMap', mode='async_local') GB('CommandReader').flatmap(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeFlatmap', mode='async_local') GB('CommandReader').accumulate(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeAccumulate', mode='async_local') GB('CommandReader').groupby(lambda x: 'key', WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeAccumulateby', mode='async_local') def WaitForKeyChangeRaisError(r, *args): f = gearsFuture() blocked.append(BlockHolder(f)) raise Exception('test') GB('CommandReader').foreach(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeForeach', mode='async_local') GB('CommandReader').filter(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeFilter', mode='async_local') GB('CommandReader').map(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeMapError', mode='async_local') GB('CommandReader').flatmap(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeFlatmapError', mode='async_local') GB('CommandReader').accumulate(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeAccumulateError', mode='async_local') GB('CommandReader').groupby(lambda x: 'key', WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeAccumulatebyError', mode='async_local') def ForEach(r): def unblock(x): global blocked try: [a.continueRun(x['key']) for a in blocked] blocked = [] except Exception as e: print(e) GB('ShardsIDReader').map(lambda x: r).foreach(unblock).run() GB().foreach(ForEach).register('x', mode='async_local') def ForEachFailed(r): def unblock(x): global blocked try: [a.continueFailed(x['key']) for a in blocked] blocked = [] except Exception as e: print(e) GB('ShardsIDReader').map(lambda x: r).foreach(unblock).run() GB().foreach(ForEachFailed).register('y', mode='async_local') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def WaitForKeyMap(): env.expect('RG.TRIGGER', 'WaitForKeyChangeMap').equal(["['WaitForKeyChangeMap']"]) def WaitForKeyChangeForeach(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeForeach').error().contains('Exception: test') def WaitForKeyChangeFilter(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeFilter').error().contains('Exception: test') def WaitForKeyFlatmap(): env.expect('RG.TRIGGER', 'WaitForKeyChangeFlatmap').equal( ['WaitForKeyChangeFlatmap']) def WaitForKeyAccumulate(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulate').equal(['None']) def WaitForKeyAccumulateby(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulateby').equal( ["{'key': 'key', 'value': None}"]) def WaitForKeyMapError(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeMapError').error().contains('Exception: test') def WaitForKeyFlatmapError(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeFlatmapError').error().contains('Exception: test') def WaitForKeyAccumulateError(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulateError').equal(['None']) def WaitForKeyAccumulatebyError(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulatebyError').equal( ["{'key': 'key', 'value': None}"]) tests = [ WaitForKeyMap, WaitForKeyChangeForeach, WaitForKeyChangeFilter, WaitForKeyFlatmap, WaitForKeyAccumulate, WaitForKeyAccumulateby, WaitForKeyMapError, WaitForKeyFlatmapError, WaitForKeyAccumulateError, WaitForKeyAccumulatebyError ] for f in tests: try: with Background(f) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('set', 'x', '1') time.sleep(1) except Exception as e: env.assertTrue( False, message='Failed waiting for WaitForKeyChange to reach unblock') for f in tests: try: with Background(f) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('set', 'y', '1') time.sleep(1) except Exception as e: env.assertTrue( False, message='Failed waiting for WaitForKeyChange to reach unblock')
def testKeysReaderAsync(env): env.skipOnCluster() conn = getConnectionByEnv(env) script = ''' fdata = None class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata if fdata: fdata[0].continueRun(fdata[1]) f = BlockHolder(gearsFuture()) fdata = (f, r['key']) return f.bc GB().map(bc).foreach(lambda x: execute('del', x)).register(mode='async_local', readValue=False, eventTypes=['set']) ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def SetX(): env.expect('set', 'x', '1') def SetY(): env.expect('set', 'y', '1') try: with TimeLimit(50): with Background(SetX) as setx: x = None while x != '1': x = conn.execute_command('GET', 'x') time.sleep(0.1) with Background(SetY) as sety: y = None while y != '1': y = conn.execute_command('GET', 'y') time.sleep(0.1) x = conn.execute_command('GET', 'x') while x: x = conn.execute_command('GET', 'x') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting to reach unblock') executions = env.cmd('RG.DUMPEXECUTIONS') for r in executions: env.expect('RG.ABORTEXECUTION', r[1]).equal('OK') env.expect('RG.DROPEXECUTION', r[1]).equal('OK') registrations = env.cmd('RG.DUMPREGISTRATIONS') for r in registrations: env.expect('RG.UNREGISTER', r[1]).equal('OK')