def query_services(self, service_type, url, timeout=10, call_int=False, delay=0, *args, **kwargs): inst = self.app().inst daemons = inst.int_app.obj(DBCluster, "daemons", silent=True) tasklets = [] for dmnid, dmninfo in daemons.data.items(): services = dmninfo.get("services", {}) for svcid, svcinfo in services.items(): if svcinfo.get("type") != service_type: continue if call_int: found = False for sid, sinfo in services.items(): if sinfo.get("type") != "int": continue found = True svcid = sid svcinfo = sinfo break if not found: continue if "addr" not in svcinfo: continue if "port" not in svcinfo: continue task = Tasklet.new(self.do_query_service_exc) tasklets.append(task) task(svcid, svcinfo, url, timeout, *args, **kwargs) if delay > 0: Tasklet.sleep(delay) Tasklet.join_all(tasklets)
def testMultiTask(self): local = TaskLocal() def t(): local.piet = [] for i in range(10): local.piet.append(i) Tasklet.yield_() self.assertEquals(range(10), local.piet) t1 = Tasklet.new(t)() t2 = Tasklet.new(t)() Tasklet.join_all([t1,t2]) self.assertEquals(2, len(local._d.keys())) #the 2 tasks are sill around, so local keeps their values #check that values are gone from dict #when tasks are gone del t1 del t2 #we need to yield, because our 2 tasks were suspended by the join #yield will run the scheduler again, so our tasks can properly finish #the only strange thing is we need 2 yields for python, stackless requires just 1 Tasklet.yield_() Tasklet.yield_() self.assertEquals([], local._d.keys())
def testMultiTask(self): local = TaskLocal() def t(): local.piet = [] for i in range(10): local.piet.append(i) Tasklet.yield_() self.assertEquals(range(10), local.piet) t1 = Tasklet.new(t)() t2 = Tasklet.new(t)() Tasklet.join_all([t1, t2]) self.assertEquals(2, len(local._d.keys( ))) #the 2 tasks are sill around, so local keeps their values #check that values are gone from dict #when tasks are gone del t1 del t2 #we need to yield, because our 2 tasks were suspended by the join #yield will run the scheduler again, so our tasks can properly finish #the only strange thing is we need 2 yields for python, stackless requires just 1 Tasklet.yield_() Tasklet.yield_() self.assertEquals([], local._d.keys())
def testerrors(self): mc = Memcached(pool=MemcachedPool(size=4)) tasks = [] for i in xrange(0, 100): tasks.append(Tasklet.new(self.error_thread)(mc)) for i in xrange(0, 100): tasks.append(Tasklet.new(self.handled_thread)(mc)) Tasklet.join_all(tasks)
def run(self): #show stats every second: Tasklet.interval(1.0, self.show, immediate = True)() #dispenses tokens for doing a request to sessions: Tasklet.new(self.dispense)() #start up sessions, and wait till they are finished Tasklet.join_all([Tasklet.new(self.sessions)() for _ in range(self.options.sessions)]) quit()
def tasks_monitor(self): req = self.req() int_app = self.app().inst.int_app daemons = int_app.call("cluster.daemons").items() daemons.sort(cmp=lambda x, y: cmp(x[0], y[0])) rdaemons = [] tasklets = [] for dmnid, daemon in daemons: rdaemon = {"id": dmnid} tasklets.append(Tasklet.new(self.fetch_status)(dmnid, rdaemon)) rdaemons.append(rdaemon) Tasklet.join_all(tasklets) vars = {"daemons": rdaemons} self.call("admin.response_template", "admin/tasks/monitor.html", vars)
def testYield(self): l = [] def child(c): for i in range(5): l.append((c, i)) Tasklet.yield_() ch1 = Tasklet.new(child)(1) ch2 = Tasklet.new(child)(2) Tasklet.join_all([ch1, ch2]) self.assertEquals([(1, 0), (2, 0), (1, 1), (2, 1), (1, 2), (2, 2), (1, 3), (2, 3), (1, 4), (2, 4)], l)
def testJoinAll(self): def sub0(): raise Exception("a proper exc") def sub1(): return 1 def sub2(): return 2 def sub3(): raise Exception("test exc") subs = [Tasklet.new(sub)() for sub in [sub0, sub1, sub2, sub3]] results = Tasklet.join_all(subs) self.assertTrue(isinstance(results[0], JoinError)) self.assertTrue(isinstance(results[0].cause, Exception)) self.assertEquals("a proper exc", str(results[0].cause), Exception) self.assertEquals(1, results[1]) self.assertEquals(2, results[2]) self.assertTrue(isinstance(results[3], JoinError)) self.assertTrue(isinstance(results[3].cause, Exception)) self.assertEquals("test exc", str(results[3].cause), Exception)
def testDeadlocks(self): def process(cnn, cur, val): try: cur.execute("begin") cur.execute("insert into tbltest (test_id) values (1)") cur.execute("select sleep(2)") cur.execute("update tbltest set test_id=%d" % val) cur.execute("select sleep(2)") cur.execute("commit") return False except dbapi.Error as e: return "deadlock" in str(e).lower() cnn1 = dbapi.connect(host = DB_HOST, user = DB_USER, passwd = DB_PASSWD, db = DB_DB) cur1 = cnn1.cursor() cnn2 = dbapi.connect(host = DB_HOST, user = DB_USER, passwd = DB_PASSWD, db = DB_DB) cur2 = cnn2.cursor() t1 = Tasklet.new(process)(cnn1, cur1, 2) t2 = Tasklet.new(process)(cnn2, cur2, 3) res = Tasklet.join_all([t1, t2]) self.assertTrue(res[0] or res[1], 'At least one of the queries expected to fail due to deadlock (innodb must be used)') # Both connections must survive after error cur1.execute("select 1") cur2.execute("select 2") cur1.close() cnn1.close() cur2.close() cnn2.close()
def tasks_monitor(self): req = self.req() int_app = self.app().inst.int_app daemons = int_app.call("cluster.daemons").items() daemons.sort(cmp=lambda x, y: cmp(x[0], y[0])) rdaemons = [] tasklets = [] for dmnid, daemon in daemons: rdaemon = { "id": dmnid, } tasklets.append(Tasklet.new(self.fetch_status)(dmnid, rdaemon)) rdaemons.append(rdaemon) Tasklet.join_all(tasklets) vars = { "daemons": rdaemons } self.call("admin.response_template", "admin/tasks/monitor.html", vars)
def testParallelQuery(self): def query(s): cnn = dbapi.connect(host = DB_HOST, user = DB_USER, passwd = DB_PASSWD, db = DB_DB) cur = cnn.cursor() cur.execute("select sleep(%d)" % s) cur.close() cnn.close() start = time.time() ch1 = Tasklet.new(query)(1) ch2 = Tasklet.new(query)(2) ch3 = Tasklet.new(query)(3) Tasklet.join_all([ch1, ch2, ch3]) end = time.time() self.assertAlmostEqual(3.0, end - start, places = 1)
def testTaskInstance2(self): AdderInstance = TaskInstance(True) with AdderInstance.set(Adder(10)): self.assertEquals(30, AdderInstance.sum(20)) #now start 2 child tasks def t(): self.assertEquals(30, AdderInstance.sum(20)) #expect to find parents instance #now set my own instance with AdderInstance.set(Adder(20)): self.assertEquals(40, AdderInstance.sum(20)) #now it must be unset, and we will find parents instance instead self.assertEquals(30, AdderInstance.sum(20)) t1 = Tasklet.new(t)() t2 = Tasklet.new(t)() Tasklet.join_all([t1, t2]) self.assertEquals(30, AdderInstance.sum(20))
def testTaskInstance2(self): AdderInstance = TaskInstance(True) with AdderInstance.set(Adder(10)): self.assertEquals(30, AdderInstance.sum(20)) #now start 2 child tasks def t(): self.assertEquals( 30, AdderInstance.sum(20)) #expect to find parents instance #now set my own instance with AdderInstance.set(Adder(20)): self.assertEquals(40, AdderInstance.sum(20)) #now it must be unset, and we will find parents instance instead self.assertEquals(30, AdderInstance.sum(20)) t1 = Tasklet.new(t)() t2 = Tasklet.new(t)() Tasklet.join_all([t1, t2]) self.assertEquals(30, AdderInstance.sum(20))
def testDeadlocks(self): def process(cnn, cur, val): try: cur.execute("begin") cur.execute("insert into tbltest (test_id) values (1)") cur.execute("select sleep(2)") cur.execute("update tbltest set test_id=%d" % val) cur.execute("select sleep(2)") cur.execute("commit") return False except dbapi.Error as e: return "deadlock" in str(e).lower() cnn1 = dbapi.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWD, db=DB_DB) cur1 = cnn1.cursor() cnn2 = dbapi.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWD, db=DB_DB) cur2 = cnn2.cursor() t1 = Tasklet.new(process)(cnn1, cur1, 2) t2 = Tasklet.new(process)(cnn2, cur2, 3) res = Tasklet.join_all([t1, t2]) self.assertTrue( res[0] or res[1], 'At least one of the queries expected to fail due to deadlock (innodb must be used)' ) # Both connections must survive after error cur1.execute("select 1") cur2.execute("select 2") cur1.close() cnn1.close() cur2.close() cnn2.close()
def testthreading(self): tasks = [] for i in xrange(0, 100): tasks.append(Tasklet.new(self.thread)(i)) Tasklet.join_all(tasks)