コード例 #1
0
 def queue (manager, work_func):
     with manager.lock_:
         job = worker_job(manager, work_func)
         manager.queue_.append(job)
         dmsg('queued job {}', job)
         manager.cond_.notify()
     return job
コード例 #2
0
 def _update_no_data (self, offset):
     bx, blk = self.locate_block(offset)
     dmsg('no data at 0x{:X} => got block {!r}', offset, blk)
     if blk.kind in (SCK_CACHED, SCK_UNCACHED):
         bx, blk = self._split_block(bx, offset)
         self._discard_contiguous_data_blocks(bx)
     pass
コード例 #3
0
 def _update_data (self, offset, data):
     dmsg('updating o=0x{:X} len=0x{:X}', offset, len(data))
     while data:
         bx, b = self.locate_block(offset)
         dmsg('ofs=0x{:X} len=0x{:X}. got block: {}', offset, len(data), b.desc())
         if b.kind == SCK_HOLE:
             if offset > b.offset:
                 self.blocks.insert(bx, uncached_data_block(b.offset, offset - b.offset) )
                 bx += 1
             self.blocks.insert(bx, cached_data_block(offset, bytearray(data)))
             self._merge_left(bx)
             b.offset = offset + len(data)
             return
         elif b.kind == SCK_UNCACHED:
             new_blocks = []
             b_end = b.offset + b.size
             if b.offset < offset:
                 new_blocks.append(uncached_data_block(b.offset, offset - b.offset))
             nb_len = min(b_end - offset, len(data))
             new_blocks.append(cached_data_block(offset, bytearray(data[0: nb_len])))
             data_end = offset + len(data)
             if data_end < b_end:
                 new_blocks.append(uncached_data_block(data_end, b_end - data_end))
             self.blocks[bx : bx + 1] = new_blocks
             self._merge_around(bx, len(new_blocks))
             offset += nb_len
             data = data[nb_len:]
         elif b.kind == SCK_CACHED:
             b_end = b.offset + len(b.data)
             update_len = min(b_end - offset, len(data))
             b.data[offset - b.offset : offset - b.offset + update_len] = data[0 : update_len]
             offset += update_len
             data = data[update_len:]
         else:
             raise sfmt("huh? {!r}", b)
コード例 #4
0
 def get_part (self, offset, size):
     '''
     returns information from cache about data starting with given offset.
     The information returned may describe a smaller portion than the requested size
     but never more. The caller must call again to get information about the
     remaining data
     '''
     if size < 0:
         raise ValueError('negative size: {}'.format(size))
     if offset < 0:
         return hole_block(offset, min(size, -offset))
     bx, b = self.locate_block(offset)
     dmsg('offset 0x{:X} -> bx={} b={!r}', offset, bx, b)
     if b.kind == SCK_UNCACHED:
         assert b.offset <= offset and offset - b.offset < b.size
         return uncached_data_block(offset, min(size, b.offset + b.size - offset))
     elif b.kind == SCK_CACHED:
         b_size = b.get_size()
         assert b.offset <= offset and offset - b.offset < b_size
         n = min(size, b.offset + b_size - offset)
         o = offset - b.offset
         return cached_data_block(offset, b.data[o : o + n])
     elif b.kind == SCK_HOLE:
         assert b.offset <= offset
         assert b.size == 0 or offset - b.offset < b.size, repr((self, b, offset))
         if b.size == 0:
             return hole_block(offset, 0)
         else:
             return hole_block(offset, min(size, b.offset + b.size - offset))
     else:
         return b
コード例 #5
0
 def _add_worker (manager):
     with manager.lock_:
         worker_id = len(manager.workers_)
         dmsg('adding worker {}...', worker_id)
         w = threading.Thread(
                 target = manager._worker,
                 kwargs = dict(worker_id = worker_id))
         manager.workers_.append(w)
     w.start()
コード例 #6
0
 def shutdown (manager):
     with manager.lock_:
         if not manager.up_: return
         manager.up_ = False
         dmsg('notifying workers to exit...')
         manager.cond_.notify_all()
     for w in manager.workers_:
         w.join()
     dmsg('done shutting down')
コード例 #7
0
 def _load (self, o, e):
     self._seek(o)
     while o < e:
         data = self.stream.read(e - o)
         dmsg('got 0x{} bytes', len(data) if data else 0)
         if not data:
             self._update_no_data(o)
             break
         self._update_data(o, data)
         o += len(data)
コード例 #8
0
 def work_ (self):
     dmsg('start work')
     if self.delay: time.sleep(self.delay)
     while True:
         size = 0
         with self.lock:
             if not self.load_queue or not self.server.up: return
             offset, size = self.load_queue.pop(0)
         if size:
             dmsg('loading o={:X} s={:X}', offset, size)
             self.source.load(offset, size)
             self.updated = True
コード例 #9
0
 def __init__ (self, init_worker_count = 4, max_worker_count = 16):
     object.__init__(self)
     self.free_worker_count = 0
     self.max_worker_count = max_worker_count
     self.stream_queue = []
     self.lock = threading.Lock()
     self.cond = threading.Condition(self.lock)
     self.up = True
     dmsg('stream_cache_server initing {} workers...', init_worker_count)
     self.workers = [threading.Thread(target = self.worker) for i in range(init_worker_count)]
     for worker in self.workers:
         worker.start()
コード例 #10
0
 def queue_load_ (self, offset, size):
     if size == 0: return
     o = zlx.int.pow2_round_down(offset, self.source.align)
     e = zlx.int.pow2_round_up(offset + size, self.source.align)
     with self.lock:
         self.updated = False
         req = stream_cache_load_request(o, e - o)
         if req in self.load_queue:
             dmsg('load request: {!r} already queued', req)
         else:
             dmsg('append load request: {!r}', req)
             self.load_queue.append(req)
             self.server.queue_stream_(self)
コード例 #11
0
 def queue_stream_ (self, scp):
     '''
     adds to the queue a stream_cache_proxy
     '''
     with self.lock:
         if scp.queued:
             dmsg('stream already queued')
             return
         dmsg('stream queued')
         self.stream_queue.append(scp)
         if self.free_worker_count == 0 and len(self.workers) < self.max_worker_count:
             self.add_worker_()
         scp.queued = True
         self.cond.notify()
コード例 #12
0
 def _worker (manager, worker_id):
     dmsg('worker {} starting...', worker_id)
     with manager.lock_:
         while manager.up_:
             while manager.queue_:
                 job = manager.queue_.pop(0)
                 dmsg('worker {} executing job {}', worker_id, job)
                 manager._exec_while_locked(job)
             manager.cond_.wait()
             dmsg('worker {} awake', worker_id)
         dmsg('worker {} exiting...', worker_id)
コード例 #13
0
 def worker (self):
     dmsg('worker')
     while True:
         scp = None
         with self.lock:
             self.free_worker_count += 1
             while not self.stream_queue and self.up:
                 self.cond.wait()
             if not self.up:
                 dmsg('exiting worker...')
                 return
             scp = self.stream_queue.pop(0)
             self.free_worker_count -= 1
         while scp:
             scp.work_()
             with self.lock:
                 if not scp.load_queue:
                     scp.queued = False
                     scp = None
コード例 #14
0
def self_test ():
    import time
    dmsg('running zlx.mth.self_test()...')
    try:
        wm = worker_manager(
                init_worker_count = 2,
                max_worker_count = 4)

        j123 = wm.queue(lambda: 123)
        jboom = wm.queue(boom)

        j123.wait()
        dmsg('j123: {}', j123.get_state())
        assert j123.get_state() == COMPLETE
        assert j123.result == 123
        dmsg('* complete simple job: passed')

        jboom.wait()
        assert jboom.get_state() == FAILED
        assert isinstance(jboom.error, RuntimeError)
        dmsg('* raise exception job: passed')

        while True:
            j = wm.queue(lambda: 234)
            if j.cancel() == CANCELLED: break
        dmsg('* cancelling job: passed')

        while True:
            j = wm.queue(slow_func)
            s = QUEUED
            while s == QUEUED:
                s = j.get_state()
                dmsg('state = {}', s)
                time.sleep(0.001)
            s = j.cancel()
            dmsg('cancelling running job: {}', s)
            if s == RUNNING:
                j.wait()
                break
        dmsg('* attempt to cancel running job: passed')

    finally:
        wm.shutdown()
    return
コード例 #15
0
 def load (self, offset, size):
     o = zlx.int.pow2_round_down(offset, self.align)
     e = zlx.int.pow2_round_up(offset + size, self.align)
     self._load(o, e)
     dmsg('load o={:X} e={:X} => {!r}', o, e, self)