Example #1
0
class ThreadPool:
    @typecheck
    def __init__(self, name: str, size: int):
        self._threads = RegisteredResourcePool(
            name, lambda name: PooledThread(name, self._release), size)
        self._queue = InterlockedPriorityQueue()

    name = property(lambda self: self._threads.name)
    size = property(lambda self: self._threads.size)
    free = property(lambda self: self._threads.free)
    busy = property(lambda self: self._threads.busy)
    over = property(lambda self: len(self._queue))

    def _push(self, work_unit=None):
        if work_unit:
            self._queue.push(work_unit)
        work_unit = self._queue.pop(0.0)
        if work_unit is None:
            return
        try:
            thread = self._threads.allocate()
        except (ResourcePoolEmpty, ResourcePoolStopped):
            self._queue.push(work_unit)
        else:
            thread.push(work_unit)

    # this method is magic - a thread previously allocated from the pool
    # releases itself back to it, then proceeds to allocate another thread,
    # possibly itself to keep processing while there are queued work units

    def _release(self, thread):
        self._threads.release(thread)
        self._push()

    valid_work_unit = lambda f: callable(f) and f.__name__.startswith("wu_")

    @typecheck
    def enqueue(self, request: Request, f: valid_work_unit, args: tuple,
                kwargs: dict):
        work_unit = WorkUnit(request, f, args, kwargs)
        self._push(work_unit)
        return work_unit
Example #2
0
class ThreadPool:

    @typecheck
    def __init__(self, name: str, size: int):
        self._threads = RegisteredResourcePool(name, lambda name: PooledThread(name, self._release), size)
        self._queue = InterlockedPriorityQueue()

    name = property(lambda self: self._threads.name)
    size = property(lambda self: self._threads.size)
    free = property(lambda self: self._threads.free)
    busy = property(lambda self: self._threads.busy)
    over = property(lambda self: len(self._queue))

    def _push(self, work_unit = None):
        if work_unit:
            self._queue.push(work_unit)
        work_unit = self._queue.pop(0.0)
        if work_unit is None:
            return
        try:
            thread = self._threads.allocate()
        except (ResourcePoolEmpty, ResourcePoolStopped):
            self._queue.push(work_unit)
        else:
            thread.push(work_unit)

    # this method is magic - a thread previously allocated from the pool
    # releases itself back to it, then proceeds to allocate another thread,
    # possibly itself to keep processing while there are queued work units

    def _release(self, thread):
        self._threads.release(thread)
        self._push()

    valid_work_unit = lambda f: callable(f) and f.__name__.startswith("wu_")

    @typecheck
    def enqueue(self, request: Request, f: valid_work_unit, args: tuple, kwargs: dict):
        work_unit = WorkUnit(request, f, args, kwargs)
        self._push(work_unit)
        return work_unit
Example #3
0
 def __init__(self, name: str, size: int):
     self._threads = RegisteredResourcePool(name, lambda name: PooledThread(name, self._release), size)
     self._queue = InterlockedPriorityQueue()
Example #4
0
 def __init__(self, name: str, size: int):
     self._threads = RegisteredResourcePool(
         name, lambda name: PooledThread(name, self._release), size)
     self._queue = InterlockedPriorityQueue()