Пример #1
0
def _wrap(
    local_rank: int,
    fn: Callable,
    args: Dict[int, Tuple],
    envs: Dict[int, Dict[str, str]],
    stdout_redirects: Dict[
        int, str],  # redirect file for stdout (to console if None)
    stderr_redirects: Dict[
        int, str],  # redirect file for stderr (to console if None)
    ret_vals: Dict[int, mp.SimpleQueue],
    queue_finished_reading_event: synchronize.Event,
) -> None:
    # get the per-rank params up front so we fail fast if no mapping is found
    args_ = args[local_rank]
    env_ = envs[local_rank]
    ret_val_ = ret_vals[local_rank]

    stdout_rd = stdout_redirects[local_rank]
    stderr_rd = stderr_redirects[local_rank]

    stdout_cm = redirect_stdout(stdout_rd) if stdout_rd else _nullcontext()
    stderr_cm = redirect_stderr(stderr_rd) if stderr_rd else _nullcontext()

    for k, v in env_.items():
        os.environ[k] = v

    with stdout_cm, stderr_cm:
        ret = record(fn)(*args_)
    ret_val_.put(ret)
    queue_finished_reading_event.wait()
Пример #2
0
class Generator():
    
    def __init__(self, configurationPath, outputQueue):
        self.processorQueue = outputQueue
        self.logger = logging.getLogger("generator")
        self.lirchandle = pylirc.init("pylirc", configurationPath, False)
        self.stopFlag = Event()

    def loop(self):
        self.logger.info("Starting up.")
        if(self.lirchandle):
            inputLirc = [self.lirchandle]
            timeout = 2
            self.logger.info("Succesfully opened lirc, handle is " + str(self.lirchandle))
            self.logger.info("Started.")
            while (not self.stopFlag.is_set()):
                inputready, outputready, exceptready = select.select(inputLirc,[],[], timeout)
                s = pylirc.nextcode(1)                    
                if(s):
                    for code in s:                        
                        repeat = code["repeat"]
                        currentCommand = code["config"]
                        self.logger.info("New event received: id: = %s, repeat = %s" % (currentCommand, repeat))
                        try:                            
                            self.processorQueue.put_nowait(GeneratorEvent(currentCommand, repeat))
                        except Queue.Full:
                            self.logger.error("Processor queue is overloaded.")                    
            self.logger.info("Shutted down.")

    def shutdown(self):
        self.logger.debug("Shutting down.")        
        self.stopFlag.set()
        pylirc.exit()        
Пример #3
0
 def __init__(self, future):
     self.error = None
     self.finished_event = Event()
     self.future = future
     self.future.add_callbacks(
         callback=self.handle_page,
         errback=self.handle_error)
     self.res = set()
Пример #4
0
def _process_inner(function: Callable[..., Any], log_level: str,
                   log_format: str, start_event: synchronize.Event,
                   stop_event: synchronize.Event, **kwargs: Any) -> None:
    basic_config(level=log_level, log_format=log_format)
    start_event.set()
    try:
        function(**kwargs)
    finally:
        stop_event.set()
Пример #5
0
def watch_for_smtps(ready_flag: MP_Event, retq: MP.Queue):
    has_smtps = False
    ready_flag.set()
    start = time.monotonic()
    delay = AUTOSTOP_DELAY * 1.5
    while (time.monotonic() - start) <= delay:
        try:
            with SMTP_SSL("localhost", 8025, timeout=0.1) as client:
                client.ehlo("exemple.org")
                has_smtps = True
                break
        except Exception:
            time.sleep(0.05)
    retq.put(has_smtps)
Пример #6
0
class ASensor(Thread):
    '''
    La classe ASensor rappresenta un sensore generico, in essa sono implementati:
    gli attributi, le configurazioni iniziali comuni a qualunque sensore.
    Ogni sensore e' uno Thread in modo tale che il suo flusso di controllo sia indipendente.
    '''


    def __init__(self, name,typeSensor,pin):
        '''
        il costruttuore accetta in ingresso:
        un nome per identificare il sensore, il tipo del sensore
        e il PIN al quale e' collegato sul Raspberry.
        
        Viene creato un oggetto di tipo Subject che rappresenta lo Stream di valori in uscita dal sensore.
        Necessario per implementare il paradigma reactive
        '''
        Thread.__init__(self, None, None, name, None, None, None)
        self.name=name
        self.typeSensor=typeSensor
        self.pin=pin
        self.flag = Event()
        self.stream=Subject()
        
    '''
    Classi che verranno implementate dai singoli sensori
    '''
    
    def getValue(self):
        pass
    
    def getName(self):
        return self.name
    
    def getType(self):
        return self.typeSensor
    
    def getPin(self):
        return self.pin
    
    def getOutputStream(self):
        return self.stream
        
    '''
    la funzione initialize configura il PIN per poterlo utilizzare.
    Questa procedura e' comune a tutti i sensori (con l'utilizzo della libreria RPi.GPIO)
    '''
     
    def stop(self):
        self.flag.set()
class Async_Optimize(object):
    def __init__(self):
        self.callback = lambda *x: None
        self.runsignal = Event()
        self.SENTINEL = "SENTINEL"

    def async_callback_collect(self, q):
        while self.runsignal.is_set():
            try:
                for ret in iter(lambda: q.get(timeout=1), self.SENTINEL):
                    self.callback(*ret)
                self.runsignal.clear()
            except Empty:
                pass

    def opt_async(self, f, df, x0, callback, update_rule=PolakRibiere,
                   messages=0, maxiter=5e3, max_f_eval=15e3, gtol=1e-6,
                   report_every=10, *args, **kwargs):
        self.runsignal.set()
        c = None
        outqueue = None
        if callback:
            outqueue = Queue()
            self.callback = callback
            c = Thread(target=self.async_callback_collect, args=(outqueue,))
            c.start()
        p = _CGDAsync(f, df, x0, update_rule, self.runsignal, self.SENTINEL,
                 report_every=report_every, messages=messages, maxiter=maxiter,
                 max_f_eval=max_f_eval, gtol=gtol, outqueue=outqueue, *args, **kwargs)
        p.start()
        return p, c

    def opt(self, f, df, x0, callback=None, update_rule=FletcherReeves,
                   messages=0, maxiter=5e3, max_f_eval=15e3, gtol=1e-6,
                   report_every=10, *args, **kwargs):
        p, c = self.opt_async(f, df, x0, callback, update_rule, messages,
                            maxiter, max_f_eval, gtol,
                            report_every, *args, **kwargs)
        while self.runsignal.is_set():
            try:
                p.join(1)
                if c: c.join(1)
            except KeyboardInterrupt:
                # print "^C"
                self.runsignal.clear()
                p.join()
                if c: c.join()
        if c and c.is_alive():
#             self.runsignal.set()
#             while self.runsignal.is_set():
#                 try:
#                     c.join(.1)
#                 except KeyboardInterrupt:
#                     # print "^C"
#                     self.runsignal.clear()
#                     c.join()
            print("WARNING: callback still running, optimisation done!")
        return p.result
Пример #8
0
async def check_multiprocess_shutdown_event(
    shutdown_event: EventType, sleep: Callable[[float], Awaitable[Any]]
) -> None:
    while True:
        if shutdown_event.is_set():
            return
        await sleep(0.1)
Пример #9
0
def execute_worker(
    task_queue: Queue,
    continue_execution_flag: Value,
    i_am_done_event: EventType,
    worker_wait_for_exit_event: EventType,
) -> None:
    """Define workers work loop."""
    while True:
        if not continue_execution_flag.value:
            i_am_done_event.set()
            worker_wait_for_exit_event.wait()
        try:
            _ = task_queue.get(block=False)
            sleep(0.001)
        except Empty:
            continue
Пример #10
0
async def check_shutdown(
    shutdown_event: EventType, sleep: Callable[[float], Awaitable[Any]]
) -> None:
    while True:
        if shutdown_event.is_set():
            raise Shutdown()
        await sleep(0.1)
Пример #11
0
class Solver(object):
    UNSOLVED = -1
    SOLVED = 0
    SOLVING = 1
    UNSOLVABLE = 2
    r"""
    @attention: Always reference the model thus: solver.model.xxx
    """
    def __init__(self, model, count=1):
        self._lock = RLock()
        self._model = model
        #    Context:
        self._abort = Event()
        self._abort.clear()
        self._solvers = Context(count, self._getModel, self._abort, self._lock)
        self._isAborted = False
    def _getModel(self):
        with self.lock():
            return self._model
    model = property(_getModel)
    def __str__(self):
        pre="Unsolved"
        with self.lock():
            if len(list(self._model.unknowns()))==0:
                pre="> SOLVED < "
            return "\n".join(["%(PRE)sSolver:"%{"PRE":pre}, str(self._model)])
    def lock(self):
        return self._lock
    def serialize(self):
        #    Retrieve the model ready for storing:
        with self.lock():
            return self._model.serialize()
    def deserialize(self, data):
        #    Load the model ready for use:
        with self.lock():
            self._model = self.model.deserialize(data)
    def clone(self):
        with self.lock():
            try:
                self.abort()
            except Aborted, _e:
                pass
            self._abort.set()
            m = self.model.clone()
            self._isAborted = False
            return Solver(m)
Пример #12
0
 def __init__(self, model, count=1):
     self._lock = RLock()
     self._model = model
     #    Context:
     self._abort = Event()
     self._abort.clear()
     self._solvers = Context(count, self._getModel, self._abort, self._lock)
     self._isAborted = False
Пример #13
0
class PipelineSolver(object):
    def __init__(self, model, abort, lock):
        self.model = model
        self.abort = abort
        self.lock = lock
        self.event = Event()
        self.event.clear()
        self.args = []
        self.unsolvable = False
        self.solved = False
        model.addParentListener(self)
        self._strategies = StrategiesFactory(model, self._checkAbort, lambda: self.solved, lambda: self.unsolvable)
        self._resetPipeline()
    def notification(self, who, event=None, args=None):
        if who==grid.NAME:
            if event in [iModelNotifications.ALL(), iModelNotifications.SOLVED(), iModelNotifications.UNSOLVABLE]:
                self.args = []
            if event==iModelNotifications.SOLVED():
                self.solved = True
            if event==iModelNotifications.UNSOLVABLE():
                self.unsolvable = True
        if event==iModelNotifications.CUSTOM():
            return
        self.args.append((event, args))
    def __call__(self):
        try:
            if self.unsolvable:
                raise UnSolveable(self.model)
            if self.solved or self.model.solved:
                raise Solved(self.model)
            eventCount = self._eventCount()
            if len(self.pipeline)>0 or eventCount>0:
                self._executePipeline(eventCount)
            else:
                print "out of rules!"
                raise UnSolveable(self.model)
        except (Solved, UnSolveable, AbortAck), e:
            if isinstance(e, Solved):
                self.solved = True
                self.model.solved = True
            elif isinstance(e, UnSolveable):
                self.unsolvable = True
                self.model.unsolvable = True
            self.args = []
            self.pipeline = []
            raise
Пример #14
0
def spin(msg: str, done: synchronize.Event) -> None:
    for char in itertools.cycle("|/-\\"):
        status = f"\r{char} {msg}"
        print(status, end="", flush=True)
        if done.wait(0.1):
            break
    blanks = " " * len(status)
    print(f"\r{blanks}\r", end="")
def spin(msg: str, done: synchronize.Event) -> None:  # <1>
    for char in itertools.cycle(r'\|/-'):  # <2>
        status = f'\r{char} {msg}'  # <3>
        print(status, end='', flush=True)
        if done.wait(.1):  # <4>
            break  # <5>
    blanks = ' ' * len(status)
    print(f'\r{blanks}\r', end='')  # <6>
Пример #16
0
def spin(msg: str, done: synchronize.Event) -> None:
    for char in itertools.cycle(r'\|/-'):
        status = f'\r{char} {msg}'
        print(status, end='', flush=True)
        if done.wait(.1):
            break
    blanks = ' ' * len(status)
    print(f'\r{blanks}\r', end='')
def enqueue_worker(
    workload_publisher_url: str,
    task_queue: Queue,
    continue_execution_flag: Value,
    worker_wait_for_exit_event: EventType,
) -> None:
    """Fill the queue."""
    context = Context()
    sub_socket = context.socket(SUB)
    sub_socket.connect(workload_publisher_url)
    sub_socket.setsockopt_string(SUBSCRIBE, "")

    while True:
        published_data: Dict = sub_socket.recv_json()
        if not continue_execution_flag.value:
            worker_wait_for_exit_event.wait()
        else:
            handle_published_data(published_data, task_queue)
Пример #18
0
def _handle_lock(event_in: synchronize.Event, event_out: synchronize.Event,
                 path: str) -> None:
    """
    Acquire a file lock on given path, then wait to release it. This worker is coordinated
    using events to signal when the lock should be acquired and released.
    :param multiprocessing.Event event_in: event object to signal when to release the lock
    :param multiprocessing.Event event_out: event object to signal when the lock is acquired
    :param path: the path to lock
    """
    if os.path.isdir(path):
        my_lock = lock.lock_dir(path)
    else:
        my_lock = lock.LockFile(path)
    try:
        event_out.set()
        assert event_in.wait(
            timeout=20), 'Timeout while waiting to release the lock.'
    finally:
        my_lock.release()
Пример #19
0
def watch_for_tls(ready_flag: MP_Event, retq: MP.Queue):
    has_tls = False
    req_tls = False
    ready_flag.set()
    start = time.monotonic()
    delay = AUTOSTOP_DELAY * 4
    while (time.monotonic() - start) <= delay:
        try:
            with SMTPClient("localhost", 8025, timeout=0.1) as client:
                resp = client.docmd("HELP", "HELO")
                if resp == S.S530_STARTTLS_FIRST:
                    req_tls = True
                client.ehlo("exemple.org")
                if "starttls" in client.esmtp_features:
                    has_tls = True
                break
        except Exception:
            time.sleep(0.05)
    retq.put(has_tls)
    retq.put(req_tls)
Пример #20
0
def execute_queries(
    worker_id: str,
    task_queue: Queue,
    connection_pool: pool,
    continue_execution_flag: Value,
    database_id: str,
    i_am_done_event: EventType,
    worker_wait_for_exit_event: EventType,
) -> None:
    """Define workers work loop."""
    with PoolCursor(connection_pool) as cur:
        with StorageCursor(STORAGE_HOST, STORAGE_PORT, STORAGE_USER,
                           STORAGE_PASSWORD, database_id) as log:
            succesful_queries: List[Tuple[int, int, str, str, str]] = []
            failed_queries: List[Tuple[int, str, str, str]] = []
            last_batched = time_ns()
            while True:
                if not continue_execution_flag.value:
                    i_am_done_event.set()
                    worker_wait_for_exit_event.wait()

                try:
                    task: Tuple[str, Tuple[Tuple[Union[str, int],
                                                 Optional[str]], ...], str,
                                str] = task_queue.get(block=False)
                    query, not_formatted_parameters, workload_type, query_type = task
                    query = query.replace("[STREAM_ID]", str(worker_id))
                    formatted_parameters = get_formatted_parameters(
                        not_formatted_parameters)
                    endts, latency = execute_task(cur, query,
                                                  formatted_parameters)
                    succesful_queries.append(
                        (endts, latency, workload_type, query_type, worker_id))
                except Empty:
                    continue
                except (ValueError, Error) as e:
                    failed_queries.append(
                        (time_ns(), worker_id, str(task), str(e)))

                last_batched = log_results(log, last_batched,
                                           succesful_queries, failed_queries)
Пример #21
0
 def __init__(self, model, abort, lock):
     self.model = model
     self.abort = abort
     self.lock = lock
     self.event = Event()
     self.event.clear()
     self.args = []
     self.unsolvable = False
     self.solved = False
     model.addParentListener(self)
     self._strategies = StrategiesFactory(model, self._checkAbort, lambda: self.solved, lambda: self.unsolvable)
     self._resetPipeline()
Пример #22
0
    async def wslogs(self,
                     remote_id: 'str',
                     stop_event: Event,
                     current_line: int = 0):
        """ websocket log stream from remote pea/pod

        :param remote_id: the identity of that pea/pod
        :param stop_event: the multiprocessing event which marks if stop event is set
        :param current_line: the line number from which logs would be streamed
        :return:
        """
        with ImportExtensions(required=True):
            import websockets

        try:
            # sleeping for few seconds to allow the logs to be written in remote
            await asyncio.sleep(3)
            async with websockets.connect(
                    f'{self.log_url}/{remote_id}?timeout=20') as websocket:
                await websocket.send(json.dumps({'from': current_line}))
                remote_loggers = {}
                while True:
                    log_line = await websocket.recv()
                    if log_line:
                        try:
                            log_line = json.loads(log_line)
                            current_line = int(list(log_line.keys())[0])
                            log_line_dict = list(log_line.values())[0]
                            log_line_dict = json.loads(
                                log_line_dict.split('\t')[-1].strip())
                            name = log_line_dict['name']
                            if name not in remote_loggers:
                                remote_loggers[name] = JinaLogger(
                                    context=f'🌏 {name}')
                            # TODO: change logging level, process name in local logger
                            remote_loggers[name].info(
                                f'{log_line_dict["message"].strip()}')
                        except json.decoder.JSONDecodeError:
                            continue
                    await websocket.send(json.dumps({}))
                    if stop_event.is_set():
                        for logger in remote_loggers.values():
                            logger.close()
                        raise RemotePodClosed
        except websockets.exceptions.ConnectionClosedOK:
            self.logger.debug(f'Client got disconnected from server')
            return current_line
        except websockets.exceptions.WebSocketException as e:
            self.logger.error(
                f'Got following error while streaming logs via websocket {repr(e)}'
            )
            return 0
Пример #23
0
 def __init__(self, name,typeSensor,pin):
     '''
     il costruttuore accetta in ingresso:
     un nome per identificare il sensore, il tipo del sensore
     e il PIN al quale e' collegato sul Raspberry.
     
     Viene creato un oggetto di tipo Subject che rappresenta lo Stream di valori in uscita dal sensore.
     Necessario per implementare il paradigma reactive
     '''
     Thread.__init__(self, None, None, name, None, None, None)
     self.name=name
     self.typeSensor=typeSensor
     self.pin=pin
     self.flag = Event()
     self.stream=Subject()
Пример #24
0
class PagedResultHandler(object):

    def __init__(self, future):
        self.error = None
        self.finished_event = Event()
        self.future = future
        self.future.add_callbacks(
            callback=self.handle_page,
            errback=self.handle_error)
        self.res = set()

    def handle_page(self, results):
        
        for row in results:            
            self.res.add(row[0])

        if self.future.has_more_pages:
            self.future.start_fetching_next_page()
        else:
            self.finished_event.set()

    def handle_error(self, exc):
        self.error = exc
        self.finished_event.set()
Пример #25
0
def _worker(queue: JoinableQueue, engine: Engine,
            discovery: Dict[str, DiscoveryState], objects: List[Artifact],
            lock: LockT, stopswitch: EventT):
    while not stopswitch.is_set():
        try:
            artifact = queue.get(block=True, timeout=0.5)
        except QueueEmpty:
            continue

        try:
            new_artifacts = engine.process(artifact, discovery, objects, lock)

            for new_artifact in new_artifacts:
                queue.put(new_artifact)
        finally:
            queue.task_done()
Пример #26
0
    def log(self, remote_id: 'str', stop_event: Event, **kwargs) -> None:
        """ Start the log stream from remote pea/pod, will use local logger for output

        :param remote_id: the identity of that pea/pod
        :return:
        """

        with ImportExtensions(required=True):
            import requests

        try:
            url = f'{self.log_url}/?{self.kind}_id={remote_id}'
            r = requests.get(url=url, stream=True)
            for log_line in r.iter_content():
                if log_line:
                    self.logger.info(f'🌏 {log_line.strip()}')
                if stop_event.is_set():
                    break
        except requests.exceptions.RequestException as ex:
            self.logger.error(
                f'couldn\'t connect with remote jinad url {repr(ex)}')
        finally:
            self.logger.info(f'🌏 exiting from remote logger')
def Event():
    '''
    Returns an event object
    '''
    from multiprocessing.synchronize import Event
    return Event()
Пример #28
0
 def __init__(self, configurationPath, outputQueue):
     self.processorQueue = outputQueue
     self.logger = logging.getLogger("generator")
     self.lirchandle = pylirc.init("pylirc", configurationPath, False)
     self.stopFlag = Event()
Пример #29
0
class Async_Optimize(object):
    callback = lambda *x: None
    runsignal = Event()
    SENTINEL = "SENTINEL"

    def async_callback_collect(self, q):
        while self.runsignal.is_set():
            try:
                for ret in iter(lambda: q.get(timeout=1), self.SENTINEL):
                    self.callback(*ret)
                self.runsignal.clear()
            except Empty:
                pass

    def opt_async(self,
                  f,
                  df,
                  x0,
                  callback,
                  update_rule=PolakRibiere,
                  messages=0,
                  maxiter=5e3,
                  max_f_eval=15e3,
                  gtol=1e-6,
                  report_every=10,
                  *args,
                  **kwargs):
        self.runsignal.set()
        c = None
        outqueue = None
        if callback:
            outqueue = Queue()
            self.callback = callback
            c = Thread(target=self.async_callback_collect, args=(outqueue, ))
            c.start()
        p = _CGDAsync(f,
                      df,
                      x0,
                      update_rule,
                      self.runsignal,
                      self.SENTINEL,
                      report_every=report_every,
                      messages=messages,
                      maxiter=maxiter,
                      max_f_eval=max_f_eval,
                      gtol=gtol,
                      outqueue=outqueue,
                      *args,
                      **kwargs)
        p.start()
        return p, c

    def opt(self,
            f,
            df,
            x0,
            callback=None,
            update_rule=FletcherReeves,
            messages=0,
            maxiter=5e3,
            max_f_eval=15e3,
            gtol=1e-6,
            report_every=10,
            *args,
            **kwargs):
        p, c = self.opt_async(f, df, x0, callback, update_rule, messages,
                              maxiter, max_f_eval, gtol, report_every, *args,
                              **kwargs)
        while self.runsignal.is_set():
            try:
                p.join(1)
                if c: c.join(1)
            except KeyboardInterrupt:
                # print "^C"
                self.runsignal.clear()
                p.join()
                if c: c.join()
        if c and c.is_alive():
            #             self.runsignal.set()
            #             while self.runsignal.is_set():
            #                 try:
            #                     c.join(.1)
            #                 except KeyboardInterrupt:
            #                     # print "^C"
            #                     self.runsignal.clear()
            #                     c.join()
            print "WARNING: callback still running, optimisation done!"
        return p.result
 def __init__(self):
     self.callback = lambda *x: None
     self.runsignal = Event()
     self.SENTINEL = "SENTINEL"
Пример #31
0
 def __init__(self):
     self.callback = lambda *x: None
     self.runsignal = Event()
     self.SENTINEL = "SENTINEL"
Пример #32
0
def Event():
    """
    Returns an event object
    """
    from multiprocessing.synchronize import Event
    return Event()
Пример #33
0
def Event():
    from multiprocessing.synchronize import Event
    return Event()