def testSimpleAsyncOnAggregateBy(env): conn = getConnectionByEnv(env) script = ''' class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) fdata = None def unbc(r): global fdata if fdata: fdata[1].continueRun(fdata[0] + 1) fdata = None return 1 return 0 def doGroupBy(k, a, r): global fdata fdata = (a if a else 0, BlockHolder(gearsFuture())) return fdata[1].bc def toDict(a, r): if a == None: a = {} currVal = a.get(r['key'], 0) a[r['key']] = currVal + r['value'] return a GB('CommandReader').flatmap(lambda x: [int(a) for a in x[1:]]).groupby(lambda x: x, doGroupBy).collect().accumulate(toDict).register(trigger='block') GB('CommandReader').map(unbc).register(trigger='unblock') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def Block(): res = env.cmd('RG.TRIGGER', 'block', '1', '1', '1', '2', '2', '3', '3', '3', '3') d = eval(res[0]) env.assertEqual(d['3'], 4 * env.shardsCount) env.assertEqual(d['1'], 3 * env.shardsCount) env.assertEqual(d['2'], 2 * env.shardsCount) try: with Background(Block) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('RG.TRIGGER', 'unblock') time.sleep(0.1) except Exception as e: print(e) env.assertTrue(False, message='Failed waiting to reach unblock')
def testKeysReaderAsync(env): env.skipOnCluster() conn = getConnectionByEnv(env) script = ''' fdata = None class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata if fdata: fdata[0].continueRun(fdata[1]) f = BlockHolder(gearsFuture()) fdata = (f, r['key']) return f.bc GB().map(bc).foreach(lambda x: execute('del', x)).register(mode='async_local', readValue=False, eventTypes=['set']) ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def SetX(): env.expect('set', 'x', '1') def SetY(): env.expect('set', 'y', '1') try: with TimeLimit(50): with Background(SetX) as setx: x = None while x != '1': x = conn.execute_command('GET', 'x') time.sleep(0.1) with Background(SetY) as sety: y = None while y != '1': y = conn.execute_command('GET', 'y') time.sleep(0.1) x = conn.execute_command('GET', 'x') while x: x = conn.execute_command('GET', 'x') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting to reach unblock') executions = env.cmd('RG.DUMPEXECUTIONS') for r in executions: env.expect('RG.ABORTEXECUTION', r[1]).equal('OK') env.expect('RG.DROPEXECUTION', r[1]).equal('OK') registrations = env.cmd('RG.DUMPREGISTRATIONS') for r in registrations: env.expect('RG.UNREGISTER', r[1]).equal('OK')
def testSimpleAsyncWithNoneAsyncResult(env): conn = getConnectionByEnv(env) script = ''' class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def continueFailed(self, r): if self.bc: self.bc.continueFailed(r) blocked = [] def WaitForKeyChangeReturnSame(r, *args): f = gearsFuture() blocked.append(BlockHolder(f)) return r GB('CommandReader').map(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeMap', mode='async_local') GB('CommandReader').flatmap(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeFlatmap', mode='async_local') GB('CommandReader').accumulate(WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeAccumulate', mode='async_local') GB('CommandReader').groupby(lambda x: 'key', WaitForKeyChangeReturnSame).register(trigger='WaitForKeyChangeAccumulateby', mode='async_local') def WaitForKeyChangeRaisError(r, *args): f = gearsFuture() blocked.append(BlockHolder(f)) raise Exception('test') GB('CommandReader').foreach(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeForeach', mode='async_local') GB('CommandReader').filter(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeFilter', mode='async_local') GB('CommandReader').map(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeMapError', mode='async_local') GB('CommandReader').flatmap(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeFlatmapError', mode='async_local') GB('CommandReader').accumulate(WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeAccumulateError', mode='async_local') GB('CommandReader').groupby(lambda x: 'key', WaitForKeyChangeRaisError).register(trigger='WaitForKeyChangeAccumulatebyError', mode='async_local') def ForEach(r): def unblock(x): global blocked try: [a.continueRun(x['key']) for a in blocked] blocked = [] except Exception as e: print(e) GB('ShardsIDReader').map(lambda x: r).foreach(unblock).run() GB().foreach(ForEach).register('x', mode='async_local') def ForEachFailed(r): def unblock(x): global blocked try: [a.continueFailed(x['key']) for a in blocked] blocked = [] except Exception as e: print(e) GB('ShardsIDReader').map(lambda x: r).foreach(unblock).run() GB().foreach(ForEachFailed).register('y', mode='async_local') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) def WaitForKeyMap(): env.expect('RG.TRIGGER', 'WaitForKeyChangeMap').equal(["['WaitForKeyChangeMap']"]) def WaitForKeyChangeForeach(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeForeach').error().contains('Exception: test') def WaitForKeyChangeFilter(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeFilter').error().contains('Exception: test') def WaitForKeyFlatmap(): env.expect('RG.TRIGGER', 'WaitForKeyChangeFlatmap').equal( ['WaitForKeyChangeFlatmap']) def WaitForKeyAccumulate(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulate').equal(['None']) def WaitForKeyAccumulateby(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulateby').equal( ["{'key': 'key', 'value': None}"]) def WaitForKeyMapError(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeMapError').error().contains('Exception: test') def WaitForKeyFlatmapError(): env.expect( 'RG.TRIGGER', 'WaitForKeyChangeFlatmapError').error().contains('Exception: test') def WaitForKeyAccumulateError(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulateError').equal(['None']) def WaitForKeyAccumulatebyError(): env.expect('RG.TRIGGER', 'WaitForKeyChangeAccumulatebyError').equal( ["{'key': 'key', 'value': None}"]) tests = [ WaitForKeyMap, WaitForKeyChangeForeach, WaitForKeyChangeFilter, WaitForKeyFlatmap, WaitForKeyAccumulate, WaitForKeyAccumulateby, WaitForKeyMapError, WaitForKeyFlatmapError, WaitForKeyAccumulateError, WaitForKeyAccumulatebyError ] for f in tests: try: with Background(f) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('set', 'x', '1') time.sleep(1) except Exception as e: env.assertTrue( False, message='Failed waiting for WaitForKeyChange to reach unblock') for f in tests: try: with Background(f) as bk: with TimeLimit(50): while bk.isAlive: conn.execute_command('set', 'y', '1') time.sleep(1) except Exception as e: env.assertTrue( False, message='Failed waiting for WaitForKeyChange to reach unblock')
def testStreamReaderAsync(env): env.skipOnCluster() conn = getConnectionByEnv(env) script = ''' fdata = [] class BlockHolder: def __init__(self, bc): self.bc = bc def __getstate__(self): state = dict(self.__dict__) state['bc'] = None return state def continueRun(self, r): if self.bc: self.bc.continueRun(r) def bc(r): global fdata f = BlockHolder(gearsFuture()) fd = (f, r) fdata.insert(0, fd) return f.bc GB('CommandReader').map(lambda a: fdata.pop()).foreach(lambda x: x[0].continueRun(x[1])).register(trigger='unblock') GB('StreamReader').map(bc).foreach(lambda x: execute('set', x['value']['key'], x['value']['val'])).register(mode='async_local', prefix='s') ''' env.expect('RG.PYEXECUTE', script).ok() # this will make sure registrations reached all the shards verifyRegistrationIntegrity(env) env.cmd('xadd', 's', '*', 'key', 'x', 'val', '1') env.cmd('xadd', 's', '*', 'key', 'y', 'val', '2') env.cmd('xadd', 's', '*', 'key', 'z', 'val', '3') def Unblock(): while True: try: env.cmd('RG.TRIGGER', 'unblock') break except Exception as e: pass try: with TimeLimit(50): Unblock() x = None while x != '1': x = env.cmd('get', 'x') time.sleep(0.1) Unblock() y = None while y != '2': y = env.cmd('get', 'y') time.sleep(0.1) Unblock() z = None while z != '3': z = env.cmd('get', 'z') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting to reach unblock')
def testStreamReaderRestartOnSlave(): script = ''' import time def FailedOnMaster(r): numSlaves = int(execute('info', 'replication').split('\\n')[2].split(':')[1]) currNum = execute('get', 'NumOfElements') if currNum is not None: currNum = int(currNum) if currNum == 5 and numSlaves == 1: execute('set', 'inside_loop', '1') while True: time.sleep(1) execute('incr', 'NumOfElements') GB('StreamReader').foreach(FailedOnMaster).register(regex='stream', batch=3) ''' env = Env(env='oss', useSlaves=True) if env.envRunner.debugger is not None: env.skip() # valgrind is not working correctly with replication slaveConn = env.getSlaveConnection() masterConn = env.getConnection() env.cmd('rg.pyexecute', script) for i in range(3): env.execute_command('xadd', 'stream', '*', 'foo', 'bar') try: with TimeLimit(10): num = 0 while num is None or int(num) != 3: num = env.execute_command('get', 'NumOfElements') time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting for NumOfElements to reach 3') for i in range(3): env.execute_command('xadd', 'stream', '*', 'foo', 'bar') try: with TimeLimit(10): num = 0 while num is None or int(num) != 5: num = env.execute_command('get', 'NumOfElements') time.sleep(0.1) inside_loop = False while not inside_loop: inside_loop = env.execute_command('get', 'inside_loop') == '1' time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Failed waiting for NumOfElements to reach 5') slaveConn.execute_command( 'SLAVEOF', 'NO', 'ONE') # slave should become master here and continue the execution try: with TimeLimit(10): num = 0 while num is None or int(num) != 8: num = slaveConn.execute_command('get', 'NumOfElements') time.sleep(0.1) except Exception as e: env.assertTrue( False, message='Failed waiting for NumOfElements to reach 8 on slave')
def testRegistersReplicatedToSlave(): env = Env(useSlaves=True, env='oss') if env.envRunner.debugger is not None: env.skip() # valgrind is not working correctly with replication conn = getConnectionByEnv(env) env.cmd( 'rg.pyexecute', "GB().filter(lambda x: x['key'] != 'NumOfKeys')." "foreach(lambda x: execute('incrby', 'NumOfKeys', ('1' if 'value' in x.keys() else '-1')))." "register()") time.sleep(0.5) # wait for registration to reach all the shards slaveConn = env.getSlaveConnection() try: with TimeLimit(5): res = [] while len(res) < 1: res = slaveConn.execute_command('RG.DUMPREGISTRATIONS') except Exception: env.assertTrue(False, message='Failed waiting for Execution to reach slave') for i in range(5): conn.set(str(i), str(i)) try: with TimeLimit(5): numOfKeys = '0' while numOfKeys != '5': numOfKeys = conn.get('NumOfKeys') except Exception: env.assertTrue(False, message='Failed waiting for keys to update') ## make sure registrations did not run on slave (if it did NumOfKeys would get to 200) try: with TimeLimit(5): numOfKeys = '0' while numOfKeys != '5': numOfKeys = slaveConn.get('NumOfKeys') except Exception: env.assertTrue(False, message='Failed waiting for keys to update') ## make sure registrations did not run on slave (if it did NumOfKeys would get to 200) try: with TimeLimit(5): done = False while not done: done = True executions = env.cmd('RG.DUMPEXECUTIONS') for r in executions: try: env.cmd('RG.DROPEXECUTION', r[1]) except Exception: done = False except Exception: env.assertTrue(False, message='Failed dropping all the executions') registrations = env.cmd('RG.DUMPREGISTRATIONS') for r in registrations: env.expect('RG.UNREGISTER', r[1]).equal('OK') try: with TimeLimit(5): res = slaveConn.execute_command('RG.DUMPREGISTRATIONS') while len(res) > 0: res = slaveConn.execute_command('RG.DUMPREGISTRATIONS') except Exception: env.assertTrue( False, message='Failed waiting for registration to unregister on slave')
def testUnregisterStreamReaderWithAbortExecutions(): env = Env(moduleArgs='executionThreads 1') env.skipOnCluster() infinitScript = ''' counter = 0 def InfinitLoop(r): import time global counter counter+=1 while counter > 3: # enter an infinit loop on the third time time.sleep(0.1) return r GB('StreamReader').map(InfinitLoop).register('s', mode='async_local', onFailedPolicy='abort') ''' env.expect('rg.pyexecute', infinitScript).ok() env.cmd('xadd', 's', '*', 'foo', 'bar') # we have this part to make sure no two events will enter the same execution # because the first write triggers the background event that reads all the data # from the stream. try: with TimeLimit(4): done = False while not done: registrationInfo = env.cmd('RG.DUMPREGISTRATIONS') if registrationInfo[0][7][3] == 3 and registrationInfo[0][7][ 5] == 3: done = True time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Could not wait for all executions to finished') env.cmd('xadd', 's', '*', 'foo', 'bar') env.cmd('xadd', 's', '*', 'foo', 'bar') env.cmd('xadd', 's', '*', 'foo', 'bar') # infinit loop env.cmd('xadd', 's', '*', 'foo', 'bar') # pending execution registrationInfo = env.cmd('RG.DUMPREGISTRATIONS') registrationId = registrationInfo[0][1] try: with TimeLimit(4): done = False while not done: registrationInfo = env.cmd('RG.DUMPREGISTRATIONS') if registrationInfo[0][7][3] == 7 and registrationInfo[0][7][ 5] == 5: done = True time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Could not wait for all executions') # create another execution, make sure its pending eid = env.cmd('rg.pyexecute', 'GB("KeysOnlyReader").run()', 'UNBLOCKING') executionsInfo = env.cmd('RG.DUMPEXECUTIONS') env.assertEqual(len([a[3] for a in executionsInfo if a[3] == 'done']), 5) env.assertEqual(len([a[3] for a in executionsInfo if a[3] == 'running']), 1) env.assertEqual(len([a[3] for a in executionsInfo if a[3] == 'created']), 2) env.expect('RG.UNREGISTER', registrationId, 'abortpending').ok() try: with TimeLimit(2): while True: l = len(env.cmd('RG.DUMPEXECUTIONS')) if l == 1: break time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Could not wait for all executions to finished') try: with TimeLimit(2): executionStatus = None while executionStatus != 'done': time.sleep(0.1) executionStatus = env.cmd('RG.GETEXECUTION', eid)[0][3][1] except Exception as e: env.assertTrue(False, message='Could not wait for execution to finish') env.cmd('RG.DROPEXECUTION', eid)
def testUnregisterKeysReaderWithAbortExecutions(): env = Env(moduleArgs='executionThreads 1') env.skipOnCluster() infinitScript = ''' counter = 0 def InfinitLoop(r): import time global counter counter+=1 while counter > 3: # enter an infinit loop on the third time time.sleep(0.1) return r GB().map(InfinitLoop).register('*', mode='async_local') ''' env.expect('rg.pyexecute', infinitScript).ok() env.cmd('set', 'x', '1') env.cmd('set', 'y', '1') env.cmd('set', 'z', '1') env.cmd('set', 'l', '1') # infinit loop env.cmd('set', 'm', '1') # pending execution registrationInfo = env.cmd('RG.DUMPREGISTRATIONS') registrationId = registrationInfo[0][1] try: with TimeLimit(2): done = False while not done: registrationInfo = env.cmd('RG.DUMPREGISTRATIONS') if registrationInfo[0][7][3] == 5 and registrationInfo[0][7][ 5] == 3: done = True time.sleep(0.1) except Exception as e: env.assertTrue(False, message='Could not wait for all executions') # create another execution, make sure its pending eid = env.cmd('rg.pyexecute', 'GB("KeysOnlyReader").run()', 'UNBLOCKING') executionsInfo = env.cmd('RG.DUMPEXECUTIONS') env.assertEqual(len([a[3] for a in executionsInfo if a[3] == 'done']), 3) env.assertEqual(len([a[3] for a in executionsInfo if a[3] == 'running']), 1) env.assertEqual(len([a[3] for a in executionsInfo if a[3] == 'created']), 2) env.expect('RG.UNREGISTER', registrationId, 'abortpending').ok() env.assertEqual(len(env.cmd('RG.DUMPEXECUTIONS')), 1) try: with TimeLimit(2): executionStatus = None while executionStatus != 'done': time.sleep(0.1) executionStatus = env.cmd('RG.GETEXECUTION', eid)[0][3][1] except Exception as e: env.assertTrue(False, message='Could not wait for execution to finish') env.cmd('RG.DROPEXECUTION', eid)