def _get_pools(resource_name: str) -> (ThreadPool, RegisteredResourcePool): pool_name = resource_name with _pools_lock: if pool_name not in _combined_pools: # create and cache new thread pool and resource pool for the resource, # they are of the same size, so that each thread can always pick a resource resource_factory = ResourceFactory(resource_name) # note that the hard limit on the resource pool size is greater than the number # of worker threads, this way worker threads still have each its guaranteed resource # instance whenever necessary, but the sweeper threads also can have their busy slots # whenever they intervene for removing the expired connections or warming the pool up thread_pool = ThreadPool(resource_name, resource_factory.pool_size) resource_pool = RegisteredResourcePool(resource_name, resource_factory, resource_factory.pool_size + 2, resource_factory.pool_standby, resource_factory.pool_cache) _combined_pools[pool_name] = (thread_pool, resource_pool) return _combined_pools[pool_name]
class ThreadPool: @typecheck def __init__(self, name: str, size: int): self._threads = RegisteredResourcePool(name, lambda name: PooledThread(name, self._release), size) self._queue = InterlockedPriorityQueue() name = property(lambda self: self._threads.name) size = property(lambda self: self._threads.size) free = property(lambda self: self._threads.free) busy = property(lambda self: self._threads.busy) over = property(lambda self: len(self._queue)) def _push(self, work_unit = None): if work_unit: self._queue.push(work_unit) work_unit = self._queue.pop(0.0) if work_unit is None: return try: thread = self._threads.allocate() except (ResourcePoolEmpty, ResourcePoolStopped): self._queue.push(work_unit) else: thread.push(work_unit) # this method is magic - a thread previously allocated from the pool # releases itself back to it, then proceeds to allocate another thread, # possibly itself to keep processing while there are queued work units def _release(self, thread): self._threads.release(thread) self._push() valid_work_unit = lambda f: callable(f) and f.__name__.startswith("wu_") @typecheck def enqueue(self, request: Request, f: valid_work_unit, args: tuple, kwargs: dict): work_unit = WorkUnit(request, f, args, kwargs) self._push(work_unit) return work_unit
def _stop_thread_pools(): # clean up and stop all the pools RegisteredResourcePool.stop_pools()
def _start_thread_pools(thread_count: int, sweep_period: float): # initialize the entire thread pool machinery RegisteredResourcePool.start_pools(sweep_period)
def __init__(self, name: str, size: int): self._threads = RegisteredResourcePool(name, lambda name: PooledThread(name, self._release), size) self._queue = InterlockedPriorityQueue()
wu1 = WorkUnit(fake_request(1.0), wu_skip, (), {}) wu2 = WorkUnit(fake_request(2.0), wu_skip, (), {}) wuX = WorkUnit(InfiniteRequest(), wu_skip, (), {}) assert wu1 == wu1 and wu1 < wu2 and wu2 == wu2 and wu2 < wuX and wuX == wuX assert (wu1 < wu2 < wuX) and (wu1 <= wu2 <= wuX) assert (wuX > wu2 > wu1) and (wuX >= wu2 >= wu1) assert wu1 != wu2 and wu2 != wuX and wu1 != wuX print("ok") ################################### print("single work unit: ", end = "") RegisteredResourcePool.start_pools(0.5) try: rq = fake_request(0.5) tp = ThreadPool("TP", 1) with expected(InputParameterError("enqueue() has got an incompatible value for f: ")): tp.enqueue(rq, lambda: None, (), {}) r = tp.enqueue(rq, wu_loopback, ("foo", "bar"), {"biz": "baz"}).wait() assert r == (("foo", "bar"), { "biz": "baz" }) def wu_get_thread_name(): return current_thread().name