def testSimpleRequestCondition(self): """ Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality. (See the docs for details.) """ Request.reset_thread_pool(num_workers=1) N_ELEMENTS = 10 # It's tempting to simply use threading.Condition here, # but that doesn't quite work if the thread calling wait() is also a worker thread. # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.) # cond = threading.Condition( RequestLock() ) cond = SimpleRequestCondition() produced = [] consumed = [] def wait_for_all(): def f(i): time.sleep(0.2 * random.random()) with cond: produced.append(i) cond.notify() reqs = [] for i in range(N_ELEMENTS): req = Request(partial(f, i)) reqs.append(req) for req in reqs: req.submit() _consumed = consumed with cond: while len(_consumed) < N_ELEMENTS: while len(_consumed) == len(produced): cond.wait() logger.debug("copying {} elements".format( len(produced) - len(consumed))) _consumed += produced[len(_consumed):] # Force the request to run in a worker thread. # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock. req = Request(wait_for_all) req.submit() # Now block for completion req.wait() logger.debug("produced: {}".format(produced)) logger.debug("consumed: {}".format(consumed)) assert set(consumed) == set( range(N_ELEMENTS) ), "Expected set(range(N_ELEMENTS)), got {}".format(consumed)
def testSimpleRequestCondition(self): """ Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality. (See the docs for details.) """ num_workers = Request.global_thread_pool.num_workers Request.reset_thread_pool(num_workers=1) N_ELEMENTS = 10 # It's tempting to simply use threading.Condition here, # but that doesn't quite work if the thread calling wait() is also a worker thread. # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.) # cond = threading.Condition( RequestLock() ) cond = SimpleRequestCondition() produced = [] consumed = [] def wait_for_all(): def f(i): time.sleep(0.2*random.random()) with cond: produced.append(i) cond.notify() reqs = [] for i in range(N_ELEMENTS): req = Request( partial(f, i) ) reqs.append( req ) for req in reqs: req.submit() _consumed = consumed with cond: while len(_consumed) < N_ELEMENTS: while len(_consumed) == len(produced): cond.wait() logger.debug( "copying {} elements".format( len(produced) - len(consumed) ) ) _consumed += produced[len(_consumed):] # Force the request to run in a worker thread. # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock. req = Request( wait_for_all ) req.submit() # Now block for completion req.wait() logger.debug( "produced: {}".format(produced) ) logger.debug( "consumed: {}".format(consumed) ) assert set(consumed) == set( range(N_ELEMENTS) ), "Expected set(range(N_ELEMENTS)), got {}".format( consumed ) Request.reset_thread_pool(num_workers)
def testThreadPoolReset(self): Request.reset_thread_pool(num_workers=1) lock = threading.Lock() def check_for_contention(): assert lock.acquire(False), "Should not be contention for this lock!" time.sleep(0.1) lock.release() reqs = map( lambda x: Request( check_for_contention ), range(10) ) for req in reqs: req.submit() for req in reqs: req.wait() # Set it back to what it was Request.reset_thread_pool()
def testThreadPoolReset(self): num_workers = Request.global_thread_pool.num_workers Request.reset_thread_pool(num_workers=1) try: lock = threading.Lock() def check_for_contention(): assert lock.acquire( False), "Should not be contention for this lock!" time.sleep(0.1) lock.release() reqs = [Request(check_for_contention) for x in range(10)] for req in reqs: req.submit() for req in reqs: req.wait() finally: # Set it back to what it was Request.reset_thread_pool(num_workers)
def testThreadPoolReset(self): num_workers = Request.global_thread_pool.num_workers Request.reset_thread_pool(num_workers=1) try: lock = threading.Lock() def check_for_contention(): assert lock.acquire(False), "Should not be contention for this lock!" time.sleep(0.1) lock.release() reqs = [Request(check_for_contention) for x in range(10)] for req in reqs: req.submit() for req in reqs: req.wait() finally: # Set it back to what it was Request.reset_thread_pool(num_workers) print("done")
import threading import sys import logging handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(levelname)s %(name)s %(message)s') handler.setFormatter(formatter) # Test logger = logging.getLogger("tests.testRequestRewrite") # Test Trace traceLogger = logging.getLogger("TRACE." + logger.name) TEST_WITH_SINGLE_THREADED_DEBUG_MODE = False if TEST_WITH_SINGLE_THREADED_DEBUG_MODE: Request.reset_thread_pool(0) class TestRequest(unittest.TestCase): @traceLogged(traceLogger) def test_basic(self): """ Fire a couple requests and check the answer they give. """ def someWork(): time.sleep(0.001) return "Hello," callback_result = [''] def callback(result):
import threading import sys import logging handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(levelname)s %(name)s %(message)s') handler.setFormatter(formatter) # Test logger = logging.getLogger("tests.testRequestRewrite") # Test Trace traceLogger = logging.getLogger("TRACE." + logger.name) TEST_WITH_SINGLE_THREADED_DEBUG_MODE = False if TEST_WITH_SINGLE_THREADED_DEBUG_MODE: Request.reset_thread_pool(0) class TestRequest(unittest.TestCase): @traceLogged(traceLogger) def test_basic(self): """ Fire a couple requests and check the answer they give. """ def someWork(): time.sleep(0.001) return "Hello," callback_result = [''] def callback(result): callback_result[0] = result