class JoinableQueue(Queue): def __init__(self, maxsize=0): Queue.__init__(self, maxsize) self._unfinished_tasks = Semaphore(0) self._cond = Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, item, block=True, timeout=None): Queue.put(self, item, block, timeout) self._unfinished_tasks.release() def task_done(self): self._cond.acquire() try: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() finally: self._cond.release() def join(self): self._cond.acquire() try: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() finally: self._cond.release()
def __enter__(self): self.critical = Semaphore(1) self._errormon = ErrorMonitor() self._slavemon = SlaveMonitor(self._errormon) shared = sharedmem.empty((), dtype=[ ('ordered', 'intp'), ('barrier', 'intp'), ('dynamic', 'intp'), ]) self._barrier = Barrier(self.num_threads, shared['barrier'][...]) self._Ordered = MetaOrdered(self, shared['ordered'][...], Semaphore(1)) self._StaticForLoop = MetaStaticForLoop(self) self._DynamicForLoop = MetaDynamicForLoop(self, Semaphore(1), shared['dynamic'][...]) for param in self._variables: param.beforefork(self) self._fork() for param in self._variables: param.afterfork(self) if self.master: self._errormon.start() self._slavemon.start() LongJump.listen(self) return self
def __init__(self, n, count): self.n = n self.count = count self.count[...] = 0 self.mutex = Semaphore(1) self.turnstile = Semaphore(0) self.turnstile2 = Semaphore(0)
def testCallbackOnApiTimeout(self): eResult = Result() self.apiHead.parent.eResult = eResult self.ipcHead.connect() self.ipcNeck.connect() self.mr.returnExact = True args = (0, 1, 2, 3) kwargs = {"four":4, "five":5} api = self.apiHead.parent.method_c(*args, **kwargs) cbData = [] cbReceived = Semaphore(0) def cb(tId, data): cbData.append((tId, data)) cbReceived.release() # Make the call asynchronous: api.sync = eSync.ASYNCHRONOUS api.callback = cb api.solicited = True api.timeout = 1 asyncResult = api() assert isinstance(asyncResult, iAsyncResult) assert asyncResult tId = asyncResult.tId() assert tId # Now wait for the callback to have been called: assert cbReceived.acquire(timeout=5) assert cbData[0][0] == tId assert isinstance(cbData[0][1], TransactionFailed) # The semaphore should NOT be acquirable: try: self.ipcHead.getTransactionManager().acquireNew(tId) except TypeError, _e: assert True
class MyTransport(iIpcTransport): def __init__(self, *args, **kwargs): self.dataSent = [] self.dataSentLock = Semaphore(0) def sendData(self, result, transactionId): self.dataSent.append((transactionId, result)) self.dataSentLock.release()
def _build_single_scenario_proc(clean: bool, allow_offset_map: bool, scenario: str, semaphore: synchronize.Semaphore): semaphore.acquire() try: _build_single_scenario(clean, allow_offset_map, scenario) finally: semaphore.release()
def _startQReader(self): if self.qReader==None: self.terminateQReader = False startMutex = Semaphore(0) self.qReader = threading.Thread(target=self.run, args=[startMutex, self._getLogger("QReader.thread", self.loggingLevel)]) self.qReader.setName("qReader") self.qReader.setDaemon(True) self.qReader.start() startMutex.acquire()
class ServerSink(iMockDebuggerSink): def __init__(self, peerName, theTime, details, quiet): self._peerName = peerName self._methods = [] methods = iMockDebuggerSink()._getMethods() self._methods = methods self._terminate = False self._details = details self._qw = None self._startMutex = Semaphore(0) self._q = Queue() self.quiet= quiet self._marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=quiet) self._qw = QueueWriter(target=details, autoConnect=True, marshaller=self._marshaller, quiet=quiet) self._qw.start() self.thread = None def start(self): t = threading.Thread(target=self.run, args=[self._startMutex]) t.setName("ServerSink.%(P)s"%{"P":self._peerName}) t.setDaemon(True) self.thread = t self.thread.start() return "server.sink.started" def close(self): self._terminate = True try: self.thread.join() except: pass try: self._qw.close() except: pass try: self._q.close() except: pass return "server.sink.closed" def waitUntilRunning(self, block=True, timeout=None): self._startMutex.acquire(block=block, timeout=timeout) return self def __getattribute__(self, name): if name in object.__getattribute__(self, "_methods"): q = self._q def wrapper(self, *args, **kwargs): ServerSink._testPickleability((name, args, kwargs)) q.put((name, args, kwargs)) return wrapper return object.__getattribute__(self, name) def run(self, startMutex): startMutex.release() while self._terminate==False: try: data = self._q.get(block=True, timeout=1) except Empty: pass else: ServerSink._testPickleability(data) try: self._qw.put(data, block=True, timeout=10) except Exception, _e: break
class MyTransactionManager(TransactionManager): def __init__(self): super(MyTransactionManager, self).__init__(StandardTransactionGenerator()) self.lastTid = None self.sem = Semaphore(0) self.result = [] def next(self): self.lastTid = super(MyTransactionManager, self).next() return self.lastTid def release(self, tId, result): self.sem.release() self.result.append((tId, result)) super(MyTransactionManager, self).release(tId, result)
class JoinableQueue(Queue): def __init__(self, maxsize = 0): Queue.__init__(self, maxsize) self._unfinished_tasks = Semaphore(0) self._cond = Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block = True, timeout = None): if not not self._closed: raise AssertionError raise self._sem.acquire(block, timeout) or Full self._notempty.acquire() self._cond.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() finally: self._cond.release() self._notempty.release() return def task_done(self): self._cond.acquire() try: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() finally: self._cond.release() def join(self): self._cond.acquire() try: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() finally: self._cond.release()
class Item(object): def __init__(self, i): self._i = i self._sem = Semaphore(0) self._result = [] def acquire(self, timeout=None): return self._sem.acquire(block=True, timeout=timeout) def result(self, result): self._result.append(result) # Now the result is complete, release it: self._sem.release() def getResult(self): return self._result.pop(0)
def _start(self): with self._startLock: self._synchroniser.checkDestroyed() if self._process!=None: return try: sem = Semaphore(0) self._process = bootstrapSshProcess(self._target, self._qRx, self._qTx, sem) sem.acquire() # Now start the read thread: t = threading.Thread(target=self._rx) t.setName("SSHRebooter_%(T)s"%{"T":self._target.uId()}) t.setDaemon(True) t.start() self._threads["rx"] = t except Exception, _e: # Cleanup. self.terminate()
class SshClientFactory(object): DEFAULT_PORT = 22 def __init__(self, host, port, username, password, dataQ=None, exitCmd="exit", sshType=SshType.STB, postConnectionTimeout=0): self.username = username self.password = password self.exitCmd = exitCmd self.postConnectionTimeout = postConnectionTimeout self.host = host self.sshType = sshType if port==None: port = SshClientFactory.DEFAULT_PORT self.port = int(port) if dataQ==None: def dummyPut(*args, **kwargs): sys.stderr.write("SshClientFactory consumed data!\n") dataQ = dummyPut self.dataQ = dataQ # self.isClosing = False self.lastSendTime = time.time() # self.connection = None self.run() def run(self): sys.stderr.write("SshClientFactory running!\n") self.factory = SSHFactory(self) self.sem = Semaphore(0) def _connectLater(): sys.stderr.write("SshClientFactory connecting asynchronously\n") reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable sys.stderr.write("SshClientFactory connected\n") threads.blockingCallFromThread(reactor, _connectLater) #@UndefinedVariable self.sem.acquire() def send(self, data, tId, timeout=None): client = ClientData(self, cmd=data, tId=tId) reactor.callFromThread(self.connection.openAChannel, client) #@UndefinedVariable def terminate(self): self.isClosing = True if reactor.running: #@UndefinedVariable reactor.stop() #@UndefinedVariable def isOk(self): return reactor.running #@UndefinedVariable def serviceStarted(self): self.sem.release()
def __init__(self, q, parent): super(ApiAsyncWorker, self).__init__() self._q = q self._parent = parent self._name = "ApiAsyncWorker_%(C)s" % {"C":ApiAsyncWorker.ID.next()} self._logger = LogManager().getLogger(self._name) self.setDaemon(True) self.setName(self._name) self._cancel = False self._runLock = Semaphore(0)
def run(self): sys.stderr.write("SshClientFactory running!\n") self.factory = SSHFactory(self) self.sem = Semaphore(0) def _connectLater(): sys.stderr.write("SshClientFactory connecting asynchronously\n") reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable sys.stderr.write("SshClientFactory connected\n") threads.blockingCallFromThread(reactor, _connectLater) #@UndefinedVariable self.sem.acquire()
class ApiAsyncWorker(threading.Thread): ID = itertools.count(1) @staticmethod def startAll(workers=[]): for worker in workers: worker.start() for worker in workers: worker.waitUntilRunning() @staticmethod def create(q, parent, start=False): worker = ApiAsyncWorker(q, parent) if start: ApiAsyncWorker.startAll([worker]) return worker def __init__(self, q, parent): super(ApiAsyncWorker, self).__init__() self._q = q self._parent = parent self._name = "ApiAsyncWorker_%(C)s" % {"C":ApiAsyncWorker.ID.next()} self._logger = LogManager().getLogger(self._name) self.setDaemon(True) self.setName(self._name) self._cancel = False self._runLock = Semaphore(0) def stop(self): self._cancel = True def waitUntilRunning(self, timeout=None): # Wait until we become running: return self._runLock.acquire(block=True, timeout=timeout) def run(self): self._logger.debug("Thread running...") self._runLock.release() # Now do the work: try: self._work() except EOFError, _e: self._logger.warn("EOF in thread...") except Exception, _e: self._logger.exception("Unhandled error in thread:")
def __init__(self, peerName, theTime, filename, quiet): self._peerName = peerName self._fp = open(filename, "w") self.fp.write("File debugger started at: %(T)s for client: %(C)s"%{"T":theTime, "C":peerName}) self.fp.flush() self._methods = [] methods = iMockDebuggerSink()._getMethods() self._methods = methods self._terminate = False self.quiet= quiet self._startMutex = Semaphore(0) self._q = Queue() self.thread = None
def __init__(self, peerName, theTime, details, quiet): self._peerName = peerName self._methods = [] methods = iMockDebuggerSink()._getMethods() self._methods = methods self._terminate = False self._details = details self._qw = None self._startMutex = Semaphore(0) self._q = Queue() self.quiet= quiet self._marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=quiet) self._qw = QueueWriter(target=details, autoConnect=True, marshaller=self._marshaller, quiet=quiet) self._qw.start() self.thread = None
def init_predictor_process(self, semaphore: SemaphoreType, predictor_end: Connection): set_start_method("fork", force=True) self.data_set_manager.set_db(self.create_db()) workspace_sensors = self.data_set_manager.get_workspace_sensors() sliding_window = self.data_set_manager.get_sliding_window() component_features = self.data_set_manager.get_component_features() column_index = self.data_set_manager.get_column_order() label_encoder = self.data_set_manager.get_label_encoder() pipeline = self.data_set_manager.get_pipeline() prediction_df = DataFrame() while True: while not semaphore.acquire(): pass data: SampleInPredict = predictor_end.recv() try: validate_sensor_data_points_in_predict(data, workspace_sensors) except ValueError: predictor_end.send(["Invalid data!"]) parsed_df = parse_sensor_data_points_in_predict( data, workspace_sensors) prediction_df = pd.concat([prediction_df, parsed_df], ignore_index=True) if len(prediction_df.index) < sliding_window.window_size: # There are not enough data to do prediction continue data_windows = roll_data_frame(sliding_window, prediction_df) # We want to spare the leftover, so that we can count it in the window when the next sample arrives leftover = sliding_window.window_size - sliding_window.sliding_step prediction_df = prediction_df.iloc[len(prediction_df.index) - leftover:, :] all_feature_dfs = [] for sensor_component, features in component_features.items(): all_feature_dfs += list( extract_features(data_windows[[sensor_component, "id"]], features).values()) x = pd.concat(all_feature_dfs, axis=1)[Index(column_index)] predictions = pipeline.predict(x) translated_labels = list( label_encoder.inverse_transform(predictions)) predictor_end.send(translated_labels)
class Barrier: """ Excerpt from the Semaphore book by Downey 08 """ def __init__(self, n, count): self.n = n self.count = count self.count[...] = 0 self.mutex = Semaphore(1) self.turnstile = Semaphore(0) self.turnstile2 = Semaphore(0) def abort(self): """ ensure the master exit from Barrier """ self.mutex.release() self.turnstile.release() self.mutex.release() self.turnstile2.release() def phase1(self): try: self.mutex.acquire() self.count[...] += 1 if self.count == self.n: [self.turnstile.release() for i in range(self.n)] finally: self.mutex.release() self.turnstile.acquire() def phase2(self): try: self.mutex.acquire() self.count[...] -= 1 if self.count == 0: [self.turnstile2.release() for i in range(self.n)] finally: self.mutex.release() self.turnstile2.acquire() def wait(self): if self.n == 0: return self.phase1() self.phase2()
def __init__(self, maxsize=0): Queue.__init__(self, maxsize) self._unfinished_tasks = Semaphore(0) self._cond = Condition()
def __init__(self): super(MyTransactionManager, self).__init__(StandardTransactionGenerator()) self.lastTid = None self.sem = Semaphore(0) self.result = []
def __init__(self, *args, **kwargs): self.dataSent = [] self.dataSentLock = Semaphore(0)
def Semaphore(value=1): from multiprocessing.synchronize import Semaphore return Semaphore(value)
class FileSink(iMockDebuggerSink): def __init__(self, peerName, theTime, filename, quiet): self._peerName = peerName self._fp = open(filename, "w") self.fp.write("File debugger started at: %(T)s for client: %(C)s"%{"T":theTime, "C":peerName}) self.fp.flush() self._methods = [] methods = iMockDebuggerSink()._getMethods() self._methods = methods self._terminate = False self.quiet= quiet self._startMutex = Semaphore(0) self._q = Queue() self.thread = None def start(self): t = threading.Thread(target=self.run, args=[self._startMutex]) t.setName("FileSink.%(P)s"%{"P":self._peerName}) t.setDaemon(True) self.thread = t self.thread.start() return "file.sink.started" def close(self): self._terminate = True try: self.thread.join() except: pass try: self._fp.close() except: pass try: self._fp.close() except: pass try: self._q.close() except: pass self._fp = None return "file.sink.closed" def waitUntilRunning(self, block=True, timeout=None): self._startMutex.acquire(block=block, timeout=timeout) return self def __getattribute__(self, name): if name in object.__getattribute__(self, "_methods"): q = self._q def wrapper(self, *args, **kwargs): q.put((name, args, kwargs)) return wrapper return object.__getattribute__(self, name) def run(self, startMutex): startMutex.release() while self._terminate==False: try: data = self._q.get(block=True, timeout=1) except Empty: pass else: try: (methodName, args, kwargs) = data peerName = args[0] relativeTime = args[1] args = args[2:] ss = ["PEER:", peerName, "REL-TIME:", relativeTime, "METHOD", methodName, "ARGS:", str(args), "KWARGS", str(kwargs)] s = "\n".join(ss) except: pass else: try: self._fp.write(s) except: break
def __init__(self, i): self._i = i self._sem = Semaphore(0) self._result = []
def Semaphore(value=1): """ Returns a semaphore object """ from multiprocessing.synchronize import Semaphore return Semaphore(value)
self.ipcHead.connect() self.ipcNeck.connect() args = (0, 1, 2, 3) kwargs = {"four":4, "five":5} api = self.apiNeck.parent.method_b(*args, **kwargs) # Test the API is unsupported first: try: _result = api() except UnsupportedApiError, _e: assert True else: assert False # Now test the api is supported after we add out catchall handler: self.logger.warn("let's play...") eResult = "hello.world!" caught = Semaphore(0) def headCatchallHandler(tId, *args, **kwargs): caught.release() self.apiHead.parent.setHandler(iApi.CATCHALL, headCatchallHandler) api = self.apiNeck.parent.method_b(*args, **kwargs) api.solicited = False assert api() == None assert caught.acquire(timeout=5) def testCallbackOnApiResponse(self): eResult = Result() self.apiHead.parent.eResult = eResult self.ipcHead.connect() self.ipcNeck.connect() self.mr.returnExact = True args = (0, 1, 2, 3) kwargs = {"four":4, "five":5}