def test(s): req = Request(someWork) req.notify(callback) req.wait() time.sleep(0.001) print s return s
def test_withH5Py(self): """ We have suspicions that greenlet and h5py don't interact well with eachother. This tests basic functionality. TODO: Expand it for better coverage. """ maxDepth = 5 maxBreadth = 10 filename = 'requestTest.h5' h5File = h5py.File( filename, 'w' ) dataset = h5File.create_dataset( 'test/data', data=numpy.zeros( (maxDepth, maxBreadth), dtype=int )) def writeToH5Py(result, index, req): dataset[index] += 1 # This closure randomly chooses to either (a) return immediately or (b) fire off more work def someWork(depth, force=False, i=0): #print 'depth=', depth, 'i=', i if depth > 0 and (force or random.random() > 0.5): requests = [] for i in range(maxBreadth): req = Request(someWork, depth=depth-1, i=i) req.notify(writeToH5Py, index=(depth-1, i), req=req) requests.append(req) for r in requests: r.wait() req = Request(someWork, depth=maxDepth, force=True) req.wait() h5File.close() print "finished testWithH5Py" os.remove(filename)
def test_pause_unpause(self): handlerCounter = [0] handlerLock = threading.Lock() def completionHandler( result, req ): handlerLock.acquire() handlerCounter[0] += 1 handlerLock.release() requestCounter = [0] requestLock = threading.Lock() allRequests = [] # This closure randomly chooses to either (a) return immediately or (b) fire off more work def someWork(depth, force=False, i=-1): #print 'depth=', depth, 'i=', i if depth > 0 and (force or random.random() > 0.8): requests = [] for i in range(10): req = Request(someWork, depth=depth-1, i=i) req.notify(completionHandler, req=req) requests.append(req) allRequests.append(req) requestLock.acquire() requestCounter[0] += 1 requestLock.release() for r in requests: r.wait() req = Request(someWork, depth=6, force=True) def blubb(req): pass req.notify(blubb) global_thread_pool.pause() req2 = Request(someWork, depth=6, force=True) req2.notify(blubb) global_thread_pool.unpause() assert req2.finished == False assert req.finished req.wait() # Handler should have been called once for each request we fired assert handlerCounter[0] == requestCounter[0] print "finished pause_unpause" for r in allRequests: assert r.finished print "waited for all subrequests"
def testBasic(self): """ Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality. (See the docs for details.) """ # num_workers = Request.global_thread_pool.num_workers # Request.reset_thread_pool(num_workers=1) N_ELEMENTS = 100 # It's tempting to simply use threading.Condition here, # but that doesn't quite work if the thread calling wait() is also a worker thread. # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.) # cond = threading.Condition( RequestLock() ) cond = SimpleRequestCondition() produced = [] consumed = [] def wait_for_all(): def f(i): time.sleep(0.2 * random.random()) with cond: produced.append(i) cond.notify() reqs = [] for i in range(N_ELEMENTS): req = Request(partial(f, i)) reqs.append(req) for req in reqs: req.submit() _consumed = consumed with cond: while len(_consumed) < N_ELEMENTS: while len(_consumed) == len(produced): cond.wait() logger.debug("copying {} elements".format( len(produced) - len(consumed))) _consumed += produced[len(_consumed):] # Force the request to run in a worker thread. # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock. req = Request(wait_for_all) req.submit() # Now block for completion req.wait() logger.debug("produced: {}".format(produced)) logger.debug("consumed: {}".format(consumed)) assert set(consumed) == set( range(N_ELEMENTS) ), "Expected set(range(N_ELEMENTS)), got {}".format(consumed)
def testBasic(self): """ Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality. (See the docs for details.) """ # num_workers = Request.global_thread_pool.num_workers # Request.reset_thread_pool(num_workers=1) N_ELEMENTS = 100 # It's tempting to simply use threading.Condition here, # but that doesn't quite work if the thread calling wait() is also a worker thread. # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.) # cond = threading.Condition( RequestLock() ) cond = SimpleRequestCondition() produced = [] consumed = [] def wait_for_all(): def f(i): time.sleep(0.2 * random.random()) with cond: produced.append(i) cond.notify() reqs = [] for i in range(N_ELEMENTS): req = Request(partial(f, i)) reqs.append(req) for req in reqs: req.submit() _consumed = consumed with cond: while len(_consumed) < N_ELEMENTS: while len(_consumed) == len(produced): cond.wait() logger.debug("copying {} elements".format(len(produced) - len(consumed))) _consumed += produced[len(_consumed) :] # Force the request to run in a worker thread. # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock. req = Request(wait_for_all) req.submit() # Now block for completion req.wait() logger.debug("produced: {}".format(produced)) logger.debug("consumed: {}".format(consumed)) assert set(consumed) == set(range(N_ELEMENTS)), "Expected set(range(N_ELEMENTS)), got {}".format(consumed)
def test_callWaitDuringCallback(self): """ When using request.notify(...) to handle request completions, the handler should be allowed to call request.wait(). Currently, this causes a hang somewhere in request.py. """ def handler(result, req): return req.wait() def workFn(): pass req = Request(workFn) req.notify(handler, req=req) req.wait()
def test_callWaitDuringCallback(self): """ When using request.notify(...) to handle request completions, the handler should be allowed to call request.wait(). Currently, this causes a hang somewhere in request.py. """ def handler(result, req): return req.wait() def workFn(): pass req = Request(workFn) req.notify( handler, req=req ) req.wait()
def test_basic(self): def someWork(): time.sleep(0.001) #print "producer finished" def callback(s): pass def test(s): req = Request(someWork) req.notify(callback) req.wait() time.sleep(0.001) print s return s req = Request(test, s="hallo !") req.notify(callback) assert req.wait() == "hallo !" requests = [] for i in range(10): req = Request(test, s="hallo %d" % i) requests.append(req) for r in requests: r.wait()
def test_basic(self): def someWork(): time.sleep(0.001) #print "producer finished" def callback(s): pass def test(s): req = Request(someWork) req.notify(callback) req.wait() time.sleep(0.001) print s return s req = Request( test, s = "hallo !") req.notify(callback) assert req.wait() == "hallo !" requests = [] for i in range(10): req = Request( test, s = "hallo %d" %i) requests.append(req) for r in requests: r.wait()
def execute(self, slot, subindex, roi, result): """ Simulate a cascade of requests, to make sure that the entire cascade is properly freed. """ roiShape = roi.stop - roi.start def getResults1(): return numpy.indices(roiShape, self.Output.meta.dtype).sum() def getResults2(): req = Request( getResults1 ) req.submit() result[:] = req.wait() return result req = Request( getResults2 ) req.submit() result[:] = req.wait() return result
def execute(self, slot, subindex, roi, result): """ Simulate a cascade of requests, to make sure that the entire cascade is properly freed. """ roiShape = roi.stop - roi.start def getResults1(): return numpy.indices(roiShape, self.Output.meta.dtype).sum() def getResults2(): req = Request(getResults1) req.submit() result[:] = req.wait() return result req = Request(getResults2) req.submit() result[:] = req.wait() return result
def getResults2(): req = Request(getResults1) req.submit() result[:] = req.wait() return result
t1 = time.time() def lots_of_work(): requests = [] for i in range(mcount): req = Request(functools.partial(empty_func, b = 11)) req.submit() for r in requests: r.wait() # Make sure this test occurs entirely within greenlets. req = Request( functools.partial( lots_of_work ) ) req.submit() req.wait() t2 = time.time() print "\n\n" print "LAZYFLOW REQUEST WAIT: %f seconds for %d iterations" % (t2-t1,mcount) print " %0.3fms latency" % ((t2-t1)*1e3/mcount,) t1 = time.time() pool = Pool() for i in range(50000): pool.request(functools.partial(empty_func, b = 11)) pool.wait()
def getResults2(): req = Request( getResults1 ) req.submit() result[:] = req.wait() return result
def time_fn(num_tasks): for i in xrange(num_tasks): r = Request(fn) r.wait()
def lots_of_work(): requests = [] for i in range(mcount): req = Request(functools.partial(empty_func, b=11)) req.submit() for r in requests: r.wait() # Make sure this test occurs entirely within greenlets. req = Request(functools.partial(lots_of_work)) req.submit() req.wait() t2 = time.time() print "\n\n" print "LAZYFLOW REQUEST WAIT: %f seconds for %d iterations" % (t2 - t1, mcount) print " %0.3fms latency" % ( (t2 - t1) * 1e3 / mcount, ) t1 = time.time() pool = Pool() for i in range(50000): pool.request(functools.partial(empty_func, b=11))