class Generator(): def __init__(self, configurationPath, outputQueue): self.processorQueue = outputQueue self.logger = logging.getLogger("generator") self.lirchandle = pylirc.init("pylirc", configurationPath, False) self.stopFlag = Event() def loop(self): self.logger.info("Starting up.") if(self.lirchandle): inputLirc = [self.lirchandle] timeout = 2 self.logger.info("Succesfully opened lirc, handle is " + str(self.lirchandle)) self.logger.info("Started.") while (not self.stopFlag.is_set()): inputready, outputready, exceptready = select.select(inputLirc,[],[], timeout) s = pylirc.nextcode(1) if(s): for code in s: repeat = code["repeat"] currentCommand = code["config"] self.logger.info("New event received: id: = %s, repeat = %s" % (currentCommand, repeat)) try: self.processorQueue.put_nowait(GeneratorEvent(currentCommand, repeat)) except Queue.Full: self.logger.error("Processor queue is overloaded.") self.logger.info("Shutted down.") def shutdown(self): self.logger.debug("Shutting down.") self.stopFlag.set() pylirc.exit()
class Async_Optimize(object): def __init__(self): self.callback = lambda *x: None self.runsignal = Event() self.SENTINEL = "SENTINEL" def async_callback_collect(self, q): while self.runsignal.is_set(): try: for ret in iter(lambda: q.get(timeout=1), self.SENTINEL): self.callback(*ret) self.runsignal.clear() except Empty: pass def opt_async(self, f, df, x0, callback, update_rule=PolakRibiere, messages=0, maxiter=5e3, max_f_eval=15e3, gtol=1e-6, report_every=10, *args, **kwargs): self.runsignal.set() c = None outqueue = None if callback: outqueue = Queue() self.callback = callback c = Thread(target=self.async_callback_collect, args=(outqueue,)) c.start() p = _CGDAsync(f, df, x0, update_rule, self.runsignal, self.SENTINEL, report_every=report_every, messages=messages, maxiter=maxiter, max_f_eval=max_f_eval, gtol=gtol, outqueue=outqueue, *args, **kwargs) p.start() return p, c def opt(self, f, df, x0, callback=None, update_rule=FletcherReeves, messages=0, maxiter=5e3, max_f_eval=15e3, gtol=1e-6, report_every=10, *args, **kwargs): p, c = self.opt_async(f, df, x0, callback, update_rule, messages, maxiter, max_f_eval, gtol, report_every, *args, **kwargs) while self.runsignal.is_set(): try: p.join(1) if c: c.join(1) except KeyboardInterrupt: # print "^C" self.runsignal.clear() p.join() if c: c.join() if c and c.is_alive(): # self.runsignal.set() # while self.runsignal.is_set(): # try: # c.join(.1) # except KeyboardInterrupt: # # print "^C" # self.runsignal.clear() # c.join() print("WARNING: callback still running, optimisation done!") return p.result
def _process_inner(function: Callable[..., Any], log_level: str, log_format: str, start_event: synchronize.Event, stop_event: synchronize.Event, **kwargs: Any) -> None: basic_config(level=log_level, log_format=log_format) start_event.set() try: function(**kwargs) finally: stop_event.set()
def watch_for_smtps(ready_flag: MP_Event, retq: MP.Queue): has_smtps = False ready_flag.set() start = time.monotonic() delay = AUTOSTOP_DELAY * 1.5 while (time.monotonic() - start) <= delay: try: with SMTP_SSL("localhost", 8025, timeout=0.1) as client: client.ehlo("exemple.org") has_smtps = True break except Exception: time.sleep(0.05) retq.put(has_smtps)
class ASensor(Thread): ''' La classe ASensor rappresenta un sensore generico, in essa sono implementati: gli attributi, le configurazioni iniziali comuni a qualunque sensore. Ogni sensore e' uno Thread in modo tale che il suo flusso di controllo sia indipendente. ''' def __init__(self, name,typeSensor,pin): ''' il costruttuore accetta in ingresso: un nome per identificare il sensore, il tipo del sensore e il PIN al quale e' collegato sul Raspberry. Viene creato un oggetto di tipo Subject che rappresenta lo Stream di valori in uscita dal sensore. Necessario per implementare il paradigma reactive ''' Thread.__init__(self, None, None, name, None, None, None) self.name=name self.typeSensor=typeSensor self.pin=pin self.flag = Event() self.stream=Subject() ''' Classi che verranno implementate dai singoli sensori ''' def getValue(self): pass def getName(self): return self.name def getType(self): return self.typeSensor def getPin(self): return self.pin def getOutputStream(self): return self.stream ''' la funzione initialize configura il PIN per poterlo utilizzare. Questa procedura e' comune a tutti i sensori (con l'utilizzo della libreria RPi.GPIO) ''' def stop(self): self.flag.set()
def execute_worker( task_queue: Queue, continue_execution_flag: Value, i_am_done_event: EventType, worker_wait_for_exit_event: EventType, ) -> None: """Define workers work loop.""" while True: if not continue_execution_flag.value: i_am_done_event.set() worker_wait_for_exit_event.wait() try: _ = task_queue.get(block=False) sleep(0.001) except Empty: continue
class Solver(object): UNSOLVED = -1 SOLVED = 0 SOLVING = 1 UNSOLVABLE = 2 r""" @attention: Always reference the model thus: solver.model.xxx """ def __init__(self, model, count=1): self._lock = RLock() self._model = model # Context: self._abort = Event() self._abort.clear() self._solvers = Context(count, self._getModel, self._abort, self._lock) self._isAborted = False def _getModel(self): with self.lock(): return self._model model = property(_getModel) def __str__(self): pre="Unsolved" with self.lock(): if len(list(self._model.unknowns()))==0: pre="> SOLVED < " return "\n".join(["%(PRE)sSolver:"%{"PRE":pre}, str(self._model)]) def lock(self): return self._lock def serialize(self): # Retrieve the model ready for storing: with self.lock(): return self._model.serialize() def deserialize(self, data): # Load the model ready for use: with self.lock(): self._model = self.model.deserialize(data) def clone(self): with self.lock(): try: self.abort() except Aborted, _e: pass self._abort.set() m = self.model.clone() self._isAborted = False return Solver(m)
def _handle_lock(event_in: synchronize.Event, event_out: synchronize.Event, path: str) -> None: """ Acquire a file lock on given path, then wait to release it. This worker is coordinated using events to signal when the lock should be acquired and released. :param multiprocessing.Event event_in: event object to signal when to release the lock :param multiprocessing.Event event_out: event object to signal when the lock is acquired :param path: the path to lock """ if os.path.isdir(path): my_lock = lock.lock_dir(path) else: my_lock = lock.LockFile(path) try: event_out.set() assert event_in.wait( timeout=20), 'Timeout while waiting to release the lock.' finally: my_lock.release()
def watch_for_tls(ready_flag: MP_Event, retq: MP.Queue): has_tls = False req_tls = False ready_flag.set() start = time.monotonic() delay = AUTOSTOP_DELAY * 4 while (time.monotonic() - start) <= delay: try: with SMTPClient("localhost", 8025, timeout=0.1) as client: resp = client.docmd("HELP", "HELO") if resp == S.S530_STARTTLS_FIRST: req_tls = True client.ehlo("exemple.org") if "starttls" in client.esmtp_features: has_tls = True break except Exception: time.sleep(0.05) retq.put(has_tls) retq.put(req_tls)
def execute_queries( worker_id: str, task_queue: Queue, connection_pool: pool, continue_execution_flag: Value, database_id: str, i_am_done_event: EventType, worker_wait_for_exit_event: EventType, ) -> None: """Define workers work loop.""" with PoolCursor(connection_pool) as cur: with StorageCursor(STORAGE_HOST, STORAGE_PORT, STORAGE_USER, STORAGE_PASSWORD, database_id) as log: succesful_queries: List[Tuple[int, int, str, str, str]] = [] failed_queries: List[Tuple[int, str, str, str]] = [] last_batched = time_ns() while True: if not continue_execution_flag.value: i_am_done_event.set() worker_wait_for_exit_event.wait() try: task: Tuple[str, Tuple[Tuple[Union[str, int], Optional[str]], ...], str, str] = task_queue.get(block=False) query, not_formatted_parameters, workload_type, query_type = task query = query.replace("[STREAM_ID]", str(worker_id)) formatted_parameters = get_formatted_parameters( not_formatted_parameters) endts, latency = execute_task(cur, query, formatted_parameters) succesful_queries.append( (endts, latency, workload_type, query_type, worker_id)) except Empty: continue except (ValueError, Error) as e: failed_queries.append( (time_ns(), worker_id, str(task), str(e))) last_batched = log_results(log, last_batched, succesful_queries, failed_queries)
class PagedResultHandler(object): def __init__(self, future): self.error = None self.finished_event = Event() self.future = future self.future.add_callbacks( callback=self.handle_page, errback=self.handle_error) self.res = set() def handle_page(self, results): for row in results: self.res.add(row[0]) if self.future.has_more_pages: self.future.start_fetching_next_page() else: self.finished_event.set() def handle_error(self, exc): self.error = exc self.finished_event.set()
class Async_Optimize(object): def __init__(self): self.callback = lambda *x: None self.runsignal = Event() self.SENTINEL = "SENTINEL" def async_callback_collect(self, q): while self.runsignal.is_set(): try: for ret in iter(lambda: q.get(timeout=1), self.SENTINEL): self.callback(*ret) self.runsignal.clear() except Empty: pass def opt_async(self, f, df, x0, callback, update_rule=PolakRibiere, messages=0, maxiter=5e3, max_f_eval=15e3, gtol=1e-6, report_every=10, *args, **kwargs): self.runsignal.set() c = None outqueue = None if callback: outqueue = Queue() self.callback = callback c = Thread(target=self.async_callback_collect, args=(outqueue, )) c.start() p = _CGDAsync(f, df, x0, update_rule, self.runsignal, self.SENTINEL, report_every=report_every, messages=messages, maxiter=maxiter, max_f_eval=max_f_eval, gtol=gtol, outqueue=outqueue, *args, **kwargs) p.start() return p, c def opt(self, f, df, x0, callback=None, update_rule=FletcherReeves, messages=0, maxiter=5e3, max_f_eval=15e3, gtol=1e-6, report_every=10, *args, **kwargs): p, c = self.opt_async(f, df, x0, callback, update_rule, messages, maxiter, max_f_eval, gtol, report_every, *args, **kwargs) while self.runsignal.is_set(): try: p.join(1) if c: c.join(1) except KeyboardInterrupt: # print "^C" self.runsignal.clear() p.join() if c: c.join() if c and c.is_alive(): # self.runsignal.set() # while self.runsignal.is_set(): # try: # c.join(.1) # except KeyboardInterrupt: # # print "^C" # self.runsignal.clear() # c.join() print("WARNING: callback still running, optimisation done!") return p.result