Exemplo n.º 1
0
    def device_receive(cls, data_connection: Connection, ctrl_connection: Connection, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=False, parameters=dev_parameters):
            return False

        try:
            cls.adapt_num_read_samples_to_sample_rate(dev_parameters[cls.Command.SET_SAMPLE_RATE.name])
        except NotImplementedError:
            # Many SDRs like HackRF or AirSpy do not need to calculate READ_SAMPLES
            # as default values are either fine or given by the hardware
            pass

        if cls.ASYNCHRONOUS:
            cls.enter_async_receive_mode(data_connection)
        else:
            cls.prepare_sync_receive(ctrl_connection)

        exit_requested = False

        while not exit_requested:
            if cls.ASYNCHRONOUS:
                time.sleep(0.5)
            else:
                cls.receive_sync(data_connection)
            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=False)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        cls.shutdown_device(ctrl_connection)
        data_connection.close()
        ctrl_connection.close()
Exemplo n.º 2
0
    def process_taxonomy_merge(id: int, database_url: str, stop_flag: Event,
                               queue: Queue, log_connection: Connection):
        """
        Processes taxonomy merges from a queue.

        Parameters
        ----------
        id : int
            Just a id for identifying the process in the logs
        database_url : str
            Database URL, e.g. postgres://username:password@host:port/database
        stop_flag : Event
            Multiprocessing event to stop the processes
        queue : Queue
            Multiprocessing queue for queuing the merges
        log_connection : Connection
            Multiprocessing connection to send logs to the logger
        """
        log_connection.send(f"merge worker {id} is online")
        database_connection = None
        while not stop_flag.is_set() or not queue.empty():
            if not database_connection or (database_connection and
                                           database_connection.closed != 0):
                database_connection = psycopg2.connect(database_url)
            try:
                # Take a taxonomy from queue
                taxonomy_merges = queue.get(True, 2)
                commit_errors = 0
                # Try to insert it into the database
                while True:
                    try:
                        with database_connection:
                            with database_connection.cursor(
                            ) as database_cursor:
                                TaxonomyMerge.bulk_insert(
                                    database_cursor, taxonomy_merges)
                        break
                    except BaseException as error:
                        commit_errors += 1
                        # If there are tries left, sleep between 2 and 5 second and try again
                        if commit_errors < 3:
                            time.sleep(random.randint(2, 5))
                        else:
                            # Otherwise log the error and proceed with next taxonomy
                            commaseperated_taxonmy_merges = ", ".join([
                                f"({taxonomy_merge.source_id},{taxonomy_merge.target_id})"
                                for taxonomy_merge in taxonomy_merges
                            ])
                            log_connection.send(
                                f"taxonomy merge {commaseperated_taxonmy_merges} raises error:\n{error}\n"
                            )
                            break
            except QueueEmptyError:
                # Catch queue empty error (thrown on timeout)
                continue
        if database_connection and database_connection.closed == 0:
            database_connection.close()
        log_connection.send(f"merge worker {id} is stopping")
        # Close connection to logger
        log_connection.close()
Exemplo n.º 3
0
 def _run_user_proc(user: pwd.struct_passwd, pipe: Connection) -> None:
     os.setgid(user.pw_gid)
     os.setuid(user.pw_uid)
     os.environ[
         "DBUS_SESSION_BUS_ADDRESS"
     ] = f"unix:path=/run/user/{user.pw_uid}/bus"
     log.debug(f"Subprocess created with uid={os.getuid()} and gid={os.getgid()}")
     try:
         temp_noti: Notify.Notification = Notify.Notification.new("No message.")
         while not pipe.closed:
             log.debug(
                 f"Blocking for new notifications from {pipe} on subprocess for user {user}"
             )
             try:
                 input = pipe.recv()
             except KeyboardInterrupt:
                 input = "QUIT"
             if isinstance(input, Notification):
                 temp_noti.update(input.summary, input.body, input.app_icon)
                 temp_noti.set_hint_byte("urgency", input.urgency.value)
                 try:
                     temp_noti.show()
                 except GDBusError as e:
                     log.exception("Failed to show notification.")
             elif isinstance(input, str) and input == "QUIT":
                 log.debug(f"Quit message received. Shutting down.")
                 pipe.close()
                 break
     except EOFError:
         log.debug(f"Pipe closed {pipe} on subprocess for user {user}")
Exemplo n.º 4
0
    def update(self, conn: Connection, rtsp_url: str, buffer: bool):
        """
        Runs the rtspcam thread to grab data and keep the buffer empty.

        :param conn:  the Pipe to transmit data
        :param rtsp_url: the url of the rtspcam.
        :param buffer:  should the client read frame by frame from the buffer or just grab the latest frame?
        :return:
        """
        self.log.info(f"Starting video capture client for {rtsp_url}")
        cap = VideoCapture(rtsp_url, CAP_FFMPEG)
        self.log.info("Started...")
        run = True
        while run:
            if not conn.poll() and not buffer:
                self.log.debug("clearing buffer frame")
                cap.grab()
                continue
            rec_dat = conn.recv()
            if rec_dat == self.SEND_FRAME:
                self.log.debug("Sending next frame to parent process")
                return_value, frame = cap.read()
                conn.send(frame)
            elif rec_dat == self.END_PROCESS:
                self.log.debug("Closing connection")
                cap.release()
                run = False

        self.log.info("Camera Connection Closed")
        conn.close()
Exemplo n.º 5
0
def __chunk_wrapper(
    target: Callable[[Any], Any],
    target_kwargs: Dict[str, Any],
    target_name: str,
    connection: Connection,
    chunk_index: int,
    chunk_count: int,
):
    chunk_info = f"{chunk_index + 1}/{chunk_count}"

    try:
        logger.info(f"[{target_name}] Processing chunk {chunk_info}")
        response_data = target(**target_kwargs)
        connection.send(response_data)

        logger.info(f"[{target_name}] Successfully processed"
                    f" chunk {chunk_info}")

    except Exception as error:
        logger.exception(
            f"[{target_name}] Error when processing chunk {chunk_info}")

        connection.send(error)

    finally:
        connection.close()
Exemplo n.º 6
0
 def evaluate(self, weight_path: str, sendPipe: connection.Connection, *args, **kwargs):
     '''
     功能: 单个模型测试
     '''
     # 限制内存增长
     gpus = tf.config.experimental.list_physical_devices('GPU')
     if gpus:
         try:
             for gpu in gpus:
               tf.config.experimental.set_memory_growth(gpu, True)
         except RuntimeError as e:
             print(e)
     # 模型配置
     model = self.model_preparation()
     model = model_cnn_common.model_compile(self.type, model)
     # 导入权重
     model.load_weights(weight_path, by_name=False)
     # 生成器准备
     self.generator_preparation()
     # 数据准备
     self.data_preparation()
     # 数据预取
     self.testsets = self.testsets.prefetch(
         buffer_size=tf.data.AUTOTUNE)
     # 模型评估
     loss = model.evaluate(
         self.testsets,
         verbose=1,
         return_dict=True)
     # 返回
     sendPipe.send(loss)
     # 对应主进程, 确保管道关闭
     sendPipe.close()
Exemplo n.º 7
0
def report_progress(conn_out: connection.Connection, prefix: str = '') -> None:
    """Tell the user how much work has been done.

    :param prefix: A message to print before the progress prompt, e.g.
        'Progress: '.
    :param conn_out: A multiprocessing ``Connection`` object from which values
        may be read. Each value emitted by the object should be a float between
        0 and 1, inclusive, where 0 indicates 0% complete, and 1 represents
        100% complete. When a value of 1 is received, this function will
        return.
    :return: Nothing.
    """
    while True:
        progress = conn_out.recv()
        # \r is carriage return. The other is an ANSI escape code. See:
        # https://en.wikipedia.org/wiki/ANSI_escape_code
        print(
            '\r\033[K' + prefix + f'{progress * 100:.0f}%',
            end='',
        )
        sys.stdout.flush()
        if progress == 1:
            conn_out.close()
            print()
            break
Exemplo n.º 8
0
def run_daemon(qa_pipe: Connection):
    wiki_daemon = WikiDaemon(WIKI_PAGE)
    wiki_daemon.update_wiki_cache()
    wiki_daemon.reload_spacy_docs()
    print("wiki_daemon: Child process started")
    next_wiki_update = time.time() + UPDATE_PERIOD_SECS
    while True:
        now = time.time()
        if next_wiki_update < now:
            print("wiki_daemon: Checking Wikipedia for updates")
            updated = wiki_daemon.update_wiki_cache()

            if updated:
                print("wiki_daemon: Got new revision, updating")

            next_wiki_update = now + UPDATE_PERIOD_SECS

        if qa_pipe.poll(timeout=next_wiki_update - now):
            try:
                question = qa_pipe.recv()
                qa_pipe.send(wiki_daemon.inquiry(question))
            except EOFError:
                # Pipe was closed on other end, we're done here
                qa_pipe.close()
                return
            except ValueError:
                print("Answer was too large to send!")

                # Make sure the caller isn't still waiting for an object
                try:
                    qa_pipe.send("")
                except EOFError:
                    qa_pipe.close()
                    return
Exemplo n.º 9
0
def process_loop(
    conn: Connection, user_init: Callable[[], None], loglevel, task, task_args
):
    """Initialize a process pool worker"""

    # Install SIGBUS handler (so our parent process can abort somewhat gracefully)
    with suppress(AttributeError):  # Windows and Cygwin do not have SIGBUS
        # Windows and Cygwin do not have pthread_sigmask or SIGBUS
        signal.signal(signal.SIGBUS, process_sigbus)

    # Reconfigure the root logger for this process to send all messages to a queue
    h = ConnectionLogHandler(conn)
    root = logging.getLogger()
    remove_all_log_handlers(root)
    root.setLevel(loglevel)
    root.addHandler(h)

    user_init()

    for args in task_args:
        try:
            result = task(args)
        except Exception as e:
            conn.send((MessageType.exception, e))
            break
        else:
            conn.send((MessageType.result, result))

    conn.send((MessageType.complete, None))
    conn.close()
    return
Exemplo n.º 10
0
    def device_send(cls, ctrl_connection: Connection, send_config: SendConfig, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=True, parameters=dev_parameters):
            return False

        if cls.ASYNCHRONOUS:
            cls.enter_async_send_mode(send_config.get_data_to_send)
        else:
            cls.prepare_sync_send(ctrl_connection)

        exit_requested = False
        buffer_size = cls.CONTINUOUS_SEND_BUFFER_SIZE if send_config.continuous else cls.SEND_BUFFER_SIZE
        if not cls.ASYNCHRONOUS and buffer_size == 0:
            logger.warning("Send buffer size is zero!")

        while not exit_requested and not send_config.sending_is_finished():
            if cls.ASYNCHRONOUS:
                time.sleep(0.5)
            else:
                cls.send_sync(send_config.get_data_to_send(buffer_size))

            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=True)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        if exit_requested:
            logger.debug("{}: exit requested. Stopping sending".format(cls.__class__.__name__))
        if send_config.sending_is_finished():
            logger.debug("{}: sending is finished.".format(cls.__class__.__name__))

        cls.shutdown_device(ctrl_connection)
        ctrl_connection.close()
Exemplo n.º 11
0
    class RawSocketHandler(socketserver.BaseRequestHandler):
        def setup(self):
            registry.append(self)

        def handle(self):
            self.conn = Connection(self.request.detach())
            while self.conn._handle:
                try:
                    obj = json.loads(self.conn.recv_bytes().decode('utf-8'))
                except EOFError:
                    break
                if obj['type'] == 'message':
                    bus.post(nt_from_dict(Message, obj['message'], None))
                    self.conn.send_bytes(json.dumps({'ret': True}).encode('utf-8'))
                elif obj['type'] == 'request':
                    m = bus.post_sync(nt_from_dict(Message, obj['message'], None))
                    if m:
                        ret = {"ret": True, "response": m._asdict()}
                    else:
                        ret = {"ret": False, "response": None}
                    self.conn.send_bytes(json.dumps(ret).encode('utf-8'))

        def send(self, msg):
            if isinstance(msg, Message):
                ret = {"type": "message", "message": msg._asdict()}
            else:
                ret = {"type": "response", "response": msg._asdict()}
            self.conn.send_bytes(json.dumps(ret).encode('utf-8'))

        def finish(self):
            registry.remove(self)

        def close(self):
            self.conn.close()
Exemplo n.º 12
0
    def process_taxonomy_deletion(id: int, database_url: str, stop_flag: Event,
                                  queue: Queue, log_connection: Connection):
        """
        Processes taxonomy merges from a queue.

        Parameters
        ----------
        id : int
            Just a id for identifying the process in the logs
        database_url : str
            Database URL, e.g. postgres://username:password@host:port/database
        stop_flag : Event
            Multiprocessing event to stop the processes
        queue : Queue
            Multiprocessing queue for queuing the merges
        log_connection : Connection
            Multiprocessing connection to send logs to the logger
        """
        log_connection.send(f"deletion worker {id} is online")
        database_connection = None
        while not stop_flag.is_set() or not queue.empty():
            if not database_connection or (database_connection and
                                           database_connection.closed != 0):
                database_connection = psycopg2.connect(database_url)
            try:
                # Take a taxonomy id from queue
                taxonomy_id = queue.get(True, 2)
                with database_connection:
                    with database_connection.cursor() as database_cursor:
                        commit_errors = 0
                        # Try to insert it into the database
                        while True:
                            try:
                                database_cursor.execute(
                                    "DELETE FROM taxonomies WHERE id = %s;",
                                    (taxonomy_id, ))
                                database_cursor.execute(
                                    "DELETE FROM taxonomy_merges WHERE source_id = %s OR target_id = %s;",
                                    (taxonomy_id, taxonomy_id))
                                database_connection.commit()
                                break
                            except BaseException as error:
                                database_connection.rollback()
                                commit_errors += 1
                                if commit_errors < 3:
                                    time.sleep(random.randint(2, 5))
                                else:
                                    log_connection.send(
                                        f" taxonomy deletion {taxonomy_id} raises error:\n{error}\n"
                                    )
                                    break
            except QueueEmptyError:
                # Catch queue empty error (thrown on timeout)
                continue
        if database_connection and database_connection.closed == 0:
            database_connection.close()
        log_connection.send(f"deletion worker {id} is stopping")
        # Close connection to logger
        log_connection.close()
Exemplo n.º 13
0
def close(data, conn: Connection):
    connections.remove(conn)
    for topic in topics.keys():
        if conn in topics[topic]:
            topics[topic].remove(conn)

    if not conn.closed:
        conn.close()
Exemplo n.º 14
0
def Pconsumer_fcn(Pconn: Connection, loopCondition: Value):
    counter = 0
    while loopCondition.value == 1:
        if Pconn.poll(0.1):
            _ = Pconn.recv()
            counter += 1
    Pconn.close()
    print(f"Consumer msg: Received: {counter} messages\n")
Exemplo n.º 15
0
def _worker(
    parent: connection.Connection,
    p: connection.Connection,
    env_fn_wrapper: CloudpickleWrapper,
    obs_bufs: Optional[Union[dict, tuple, ShArray]] = None,
) -> None:

    def _encode_obs(
        obs: Union[dict, tuple, np.ndarray], buffer: Union[dict, tuple, ShArray]
    ) -> None:
        if isinstance(obs, np.ndarray) and isinstance(buffer, ShArray):
            buffer.save(obs)
        elif isinstance(obs, tuple) and isinstance(buffer, tuple):
            for o, b in zip(obs, buffer):
                _encode_obs(o, b)
        elif isinstance(obs, dict) and isinstance(buffer, dict):
            for k in obs.keys():
                _encode_obs(obs[k], buffer[k])
        return None

    parent.close()
    env = env_fn_wrapper.data()
    try:
        while True:
            try:
                cmd, data = p.recv()
            except EOFError:  # the pipe has been closed
                p.close()
                break
            if cmd == "step":
                if data is None:  # reset
                    obs = env.reset()
                else:
                    obs, reward, done, info = env.step(data)
                if obs_bufs is not None:
                    _encode_obs(obs, obs_bufs)
                    obs = None
                if data is None:
                    p.send(obs)
                else:
                    p.send((obs, reward, done, info))
            elif cmd == "close":
                p.send(env.close())
                p.close()
                break
            elif cmd == "render":
                p.send(env.render(**data) if hasattr(env, "render") else None)
            elif cmd == "seed":
                p.send(env.seed(data) if hasattr(env, "seed") else None)
            elif cmd == "getattr":
                p.send(getattr(env, data) if hasattr(env, data) else None)
            elif cmd == "setattr":
                setattr(env, data["key"], data["value"])
            else:
                p.close()
                raise NotImplementedError
    except KeyboardInterrupt:
        p.close()
Exemplo n.º 16
0
def log_data(conn: Connection, cs_logger):
    print('Starting log...')
    while True:
        data = conn.recv()
        if not data:
            conn.close()
            print('Closing...')
            return
        cs_logger.info(data)
Exemplo n.º 17
0
def slave_scheduling(context: Context, connection: Connection):
    with ProximalSimulationSlave(connection) as proxy_slave, simdag.Simulation(
            context.env_file, context.task_file) as simulation:
        with proxy_slave.scheduling_scope():
            scheduler = SlaveScheduler(simulation, proxy_slave,
                                       context.feature)
            scheduler.run()
        if context.slave_callback:
            proxy_slave.send(context.slave_callback(simulation))
        connection.close()
Exemplo n.º 18
0
def environment_worker(
        remote: Connection,
        parent_remote: Connection,
        env_fn_wrapper: callable,
        auto_reset_on_terminal: bool = False,
        render_obs_size_tuple: Optional[Tuple[int, int]] = None,  # (128, 128)
):
    """

    :param remote:
    :param parent_remote:
    :param env_fn_wrapper:
    :param auto_reset_on_terminal:
    :param render_obs_size_tuple:
    :return:"""
    warnings.simplefilter("ignore")
    # with IgnoreInterruptSignal(): # TODO: DOES NOT WORK AS intended here, endless looping, needs another way to send a close signal
    with suppress(UserWarning, KeyboardInterrupt):
        parent_remote.close()
        env = env_fn_wrapper.x()
        terminated = False
        while True:

            cmd, data = remote.recv()
            if cmd is EWC.step:
                observation, signal, terminal, info = env.step(data)
                if terminated:
                    signal = 0
                if terminal:
                    terminated = True
                    if auto_reset_on_terminal:
                        observation = env.reset()
                        terminated = False
                remote.send(GymTuple(observation, signal, terminal, info))
            elif cmd is EWC.reset:
                observation = env.reset()
                terminated = False
                remote.send(observation)
            elif cmd is EWC.close:
                env.close()
                # remote.send(None)
                break
            elif cmd is EWC.get_spaces:
                remote.send((env.observation_space, env.action_space))
            elif cmd is EWC.render:
                res = env.render(data)
                if data != RenderModeEnum.human.value:
                    if render_obs_size_tuple:
                        res = resize(res,
                                     render_obs_size_tuple)  # VERY SLOW!!!
                    remote.send(res)
            elif cmd is EWC.seed:
                env.seed(data)
            else:
                raise NotImplementedError
Exemplo n.º 19
0
def pyval(expr: str, pipe: Connection) -> None:
    """
    Évalue une expression.
    Retour (sérialisé) via pipe
    """
    try:
        evaluation = safe_eval(expr)
    except BaseException as ex:
        evaluation = ex
    pipe.send(evaluation)
    pipe.close()
Exemplo n.º 20
0
    def device_send(cls, ctrl_connection: Connection, send_config: SendConfig,
                    dev_parameters: OrderedDict):
        if not cls.init_device(
                ctrl_connection, is_tx=True, parameters=dev_parameters):
            ctrl_connection.send("failed to start tx mode")
            return False

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_send_mode(send_config.get_data_to_send)
        else:
            ret = cls.prepare_sync_send(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start tx mode")
            return False

        exit_requested = False
        buffer_size = cls.CONTINUOUS_TX_CHUNK_SIZE if send_config.continuous else cls.SYNC_TX_CHUNK_SIZE
        if not cls.ASYNCHRONOUS and buffer_size == 0:
            logger.warning("Send buffer size is zero!")

        ctrl_connection.send("successfully started tx mode")

        while not exit_requested and not send_config.sending_is_finished():
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.5)
                except KeyboardInterrupt:
                    pass
            else:
                cls.send_sync(send_config.get_data_to_send(buffer_size))

            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(),
                                             ctrl_connection,
                                             is_tx=True)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        if not cls.ASYNCHRONOUS:
            # Some Sync send calls (e.g. USRP) are not blocking, so we wait a bit here to ensure
            # that the send buffer on the SDR is cleared
            time.sleep(0.75)

        if exit_requested:
            logger.debug("{}: exit requested. Stopping sending".format(
                cls.__class__.__name__))
        if send_config.sending_is_finished():
            logger.debug("{}: sending is finished.".format(
                cls.__class__.__name__))

        cls.shutdown_device(ctrl_connection, is_tx=True)
        ctrl_connection.close()
Exemplo n.º 21
0
def _regym_worker_shared_memory(index: int, env_fn: Callable[[], gym.Env],
                                pipe: Connection, parent_pipe: Connection,
                                shared_memory: Tuple[SynchronizedArray],
                                error_queue: Queue):
    '''
    Based on function `gym.vector.async_vector_env._worker_shared_memory`
    See that function's documentation

    Custom additions:
        - 'environment' command: To return underlying environment
        - 'step' command returns:
            Note: succ_obs dimensions:
            [num_agents, num_environments, environment_observations]
    '''
    assert shared_memory is not None
    env = env_fn()
    observation_space = env.observation_space
    parent_pipe.close()
    try:
        while True:
            command, data = pipe.recv()
            if command == 'reset':
                observation = env.reset()
                write_to_shared_memory(index, observation, shared_memory,
                                       observation_space)
                pipe.send((None, True))
            elif command == 'step':
                observation, reward, done, info = env.step(data)
                if done:
                    observation = env.reset()
                write_to_shared_memory(index, observation, shared_memory,
                                       observation_space)
                pipe.send(((None, reward, done, info), True))
            elif command == 'seed':
                env.seed(data)
                pipe.send((None, True))
            elif command == 'close':
                pipe.send((None, True))
                break
            elif command == 'environment':
                pipe.send((env, True))
            elif command == '_check_observation_space':
                pipe.send((data == observation_space, True))
            else:
                raise RuntimeError(
                    'Received unknown command `{0}`. Must '
                    'be one of {`reset`, `step`, `seed`, `close`, `env`'
                    '`_check_observation_space`}.'.format(command))
    except (KeyboardInterrupt, Exception):
        error_queue.put((index, ) + sys.exc_info()[:2])
        pipe.send((None, False))
    finally:
        env.close()
Exemplo n.º 22
0
def receive(conn: Connection, _pipeline: Callable[[Iterator[dict]], Iterator]):
    """
    Процедура получения строки через сетевое соединение и обратной передачи данных нормализации
    """
    sentence = ''
    stemming.init_cache()
    tokenization.init_cache()
    config.init_cache()

    try:
        with stemming.jstem_ctx() as stemmer:
            while True:
                if conn.poll(timeout=_RTN_CONNECTION_LIFE_TIME):
                    sentence = conn.recv()

                    logger.debug(f'Received: "{sentence}"')

                    if _PROFILER:
                        profiler.enable()

                    start = process_time()
                    try:
                        analisys = normalization.analyze(sentence, stemmer)
                        result = list(_pipeline(analisys))
                    except Exception as e:
                        logger.error(f'Normalization failed with error: {e} \n {sentence}')
                        result = []
                    end = process_time()

                    if _PROFILER:
                        profiler.disable()

                    conn.send(result)
                    logger.debug(f'Sent: {result}')
                    perflog.debug(f'RTN time: {round((end-start) * 1000, 2)} ms')

                    if _PROFILER:
                        profiler.dump_stats(os.path.join(settings.ROOT_PATH, f'rtn_{time()}.cprof'))
                else:
                    raise TimeoutError
    except EOFError:
        logger.info('Incoming connection closed')
    except TimeoutError:
        logger.warning('Incoming connection timeout')
    except Exception:
        logger.exception(sentence if sentence else 'Normalization Failed')
    finally:
        stemming.cache_clear()
        tokenization.cache_clear()
        config.cache_clear()
        logger.debug('Closing connection...')
        conn.close()
        logger.debug('Connection closed')
Exemplo n.º 23
0
 def unprocessible_proteins_worker(process_connections: list, unprocessible_proteins_fasta_path: pathlib.Path, log_connection: Connection):
     log_connection.send("unprocessible proteins logger is online")
     with unprocessible_proteins_fasta_path.open("w") as fasta_file:
         while process_connections:
             for conn in wait(process_connections):
                 try:
                     fasta_entry = conn.recv()
                 except EOFError:
                     process_connections.remove(conn)
                 else:
                     fasta_file.write(fasta_entry + "\n")
                     fasta_file.flush()
     log_connection.send("unprocessible proteins logger is stopping")
     log_connection.close()
Exemplo n.º 24
0
    def run(conn: Connection) -> None:
        """
        Static Process Function.

        When a new Backend process is started, it enters here.
        Currently, it creates a WorkQueue with a worker thread
        and communicates with the BQSKit Frontend via conn.

        Args:
            conn (Connection): The connection object used to communicate
                to the frontend. Reads commands from the connection,
                processes them, and responds in a loop.
        """

        wq = WorkQueue()

        while True:

            msg = conn.recv()

            if msg == 'CLOSE':
                wq.stop()
                conn.close()
                break

            elif msg == 'SUBMIT':
                task = conn.recv()
                if not isinstance(task, CompilationTask):
                    pass  # TODO: Handle Error
                wq.enqueue(task)
                conn.send('OKAY')

            elif msg == 'STATUS':
                task_id = conn.recv()
                if not isinstance(task_id, uuid.UUID):
                    pass  # TODO: Handle Error
                conn.send(wq.status(task_id))

            elif msg == 'RESULT':
                task_id = conn.recv()
                if not isinstance(task_id, uuid.UUID):
                    pass  # TODO: Handle Error
                conn.send(wq.result(task_id))

            elif msg == 'REMOVE':
                task_id = conn.recv()
                if not isinstance(task_id, uuid.UUID):
                    pass  # TODO: Handle Error
                wq.remove(task_id)
                conn.send('OKAY')
Exemplo n.º 25
0
Arquivo: Device.py Projeto: jopohl/urh
    def device_send(cls, ctrl_connection: Connection, send_config: SendConfig, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=True, parameters=dev_parameters):
            ctrl_connection.send("failed to start tx mode")
            return False

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_send_mode(send_config.get_data_to_send)
        else:
            ret = cls.prepare_sync_send(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start tx mode")
            return False

        exit_requested = False
        buffer_size = cls.CONTINUOUS_TX_CHUNK_SIZE if send_config.continuous else cls.SYNC_TX_CHUNK_SIZE
        if not cls.ASYNCHRONOUS and buffer_size == 0:
            logger.warning("Send buffer size is zero!")

        ctrl_connection.send("successfully started tx mode")

        while not exit_requested and not send_config.sending_is_finished():
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.5)
                except KeyboardInterrupt:
                    pass
            else:
                cls.send_sync(send_config.get_data_to_send(buffer_size))

            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=True)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        if not cls.ASYNCHRONOUS:
            # Some Sync send calls (e.g. USRP) are not blocking, so we wait a bit here to ensure
            # that the send buffer on the SDR is cleared
            time.sleep(0.75)

        if exit_requested:
            logger.debug("{}: exit requested. Stopping sending".format(cls.__class__.__name__))
        if send_config.sending_is_finished():
            logger.debug("{}: sending is finished.".format(cls.__class__.__name__))

        cls.shutdown_device(ctrl_connection, is_tx=True)
        ctrl_connection.close()
Exemplo n.º 26
0
        def log_timer(receiver: Connection, sender: Connection):
            print('downloading ', end='', flush=True)
            sender.close()

            # start ticker
            self.ticker.start()

            while True:
                if receiver.poll():
                    done = receiver.recv()
                    if done:
                        print(flush=True)
                        print('download done', flush=True)

                        # stop ticker when download process done
                        self.ticker.cancel()
                        break
Exemplo n.º 27
0
def runner(env: Env,
           conn: Connection,
           reset_args: list = (),
           reset_kwargs: dict = {}):
    while True:
        action = conn.recv()
        if action == EnvStatus.RESET:
            state = env.reset(*reset_args, **reset_kwargs)
            conn.send(state)
        elif action == EnvStatus.END:
            conn.close()
            break
        else:
            state, reward, done, _ = env.step(action)
            if done:
                state = env.reset(*reset_args, **reset_kwargs)
            conn.send((state, reward, done, _))
Exemplo n.º 28
0
def env_worker(conn: Connection, **env_kwargs):
    gameplay = Ccreate_start_conditions()
    env = Haxball(gameplay=gameplay, **env_kwargs)
    i = 0
    while True:
        cmd, data = conn.recv()

        if cmd == 'step':
            a1, a2 = data
            env.step_async(a1, red_team=True)
            env.step_async(a2, red_team=False)

            env.step_physics()

            obss = []
            rews = []
            dones = []
            infos = []
            is_done = False

            for red_team in (True, False):
                obs, rew, done, info = env.step_wait(red_team=red_team)
                obss.append(obs)
                rews.append(rew)
                dones.append(done)
                infos.append(info)
                is_done |= done
            if is_done:
                env.reset()

            res = np.array(obss), np.array(rews), np.array(dones), np.array(
                infos)
            conn.send(res)
        elif cmd == 'reset':
            ob = env.reset()
            conn.send([ob, ob])
        elif cmd == 'render':
            res = env.render(mode='rgb_array')
            conn.send(res)
        elif cmd == 'close':
            conn.close()
            break
        elif cmd == 'get_spaces_spec':
            conn.send((env.observation_space, env.action_space, env.spec))
        else:
            raise NotImplementedError
Exemplo n.º 29
0
 def process_taxonomy_merge(id: int, database_url: str, stop_flag: Event, queue: Queue, log_connection: Connection):
     log_connection.send(f"merge worker {id} is online")
     # Create database connection
     engine = create_engine(database_url, pool_size = 1, max_overflow = 0, pool_timeout = 3600)
     SessionClass = sessionmaker(bind = engine, autoflush=False)
     while not stop_flag.is_set() or not queue.empty():
         # Open new session
         session = SessionClass()
         try:
             # Take a taxonomy from queue
             taxonomy_merge = queue.get(True, 2)
             # Check if the merge already exists
             existing_taxonomy_merge = session.query(TaxonomyMerge).filter(TaxonomyMerge.source_id == taxonomy_merge.source_id, TaxonomyMerge.target_id == taxonomy_merge.target_id).one_or_none()
             if not existing_taxonomy_merge:
                 commit_errors = 0
                 # Try to insert it into the database
                 while True:
                     try:
                         # Insert merge
                         session.add(taxonomy_merge)
                         # Update all taxonomy id of proteins which have the old id.
                         session.query(Protein).filter(Protein.taxonomy_id == taxonomy_merge.source_id).update({Protein.taxonomy_id: taxonomy_merge.target_id})
                         session.commit()
                         break
                     except BaseException as error:
                         # Do a rollback
                         session.rollback()
                         commit_errors += 1
                         # If there are tries left, sleep between 2 and 5 second and try again
                         if commit_errors < 3:
                             time.sleep(random.randint(2, 5))
                         else:
                             # Otherwise log the error and proceed with next taxonomy
                             log_connection.send(f" taxonomy merge {taxonomy_merge.source_id}|{taxonomy_merge.target_id} raises error:\n{error}\n")
                             break
         except QueueEmptyError:
             # Catch queue empty error (thrown on timeout)
             continue
         # Close session
         session.close()
     # Dispose all connection within the engine
     engine.dispose()
     log_connection.send(f"merge worker {id} is stopping")
     # Close connection to logger
     log_connection.close()
Exemplo n.º 30
0
 def _start(self, factory: Callable[[], Env], connection: Connection):
     env = factory()
     _ = env.reset()
     connection.send((env.observation_space, env.action_space))
     terminate = False
     while not terminate:
         command, kwargs = connection.recv()
         if command == 'render':
             rendering = env.render(**kwargs)
             connection.send(rendering)
         elif command == 'step':
             step = env.step(**kwargs)
             connection.send(step)
         elif command == 'reset':
             obs = env.reset(**kwargs)
             connection.send(obs)
         elif command == 'close':
             terminate = True
             connection.close()
Exemplo n.º 31
0
Arquivo: Device.py Projeto: jopohl/urh
    def device_receive(cls, data_connection: Connection, ctrl_connection: Connection, dev_parameters: OrderedDict):
        if not cls.init_device(ctrl_connection, is_tx=False, parameters=dev_parameters):
            ctrl_connection.send("failed to start rx mode")
            return False

        try:
            cls.adapt_num_read_samples_to_sample_rate(dev_parameters[cls.Command.SET_SAMPLE_RATE.name])
        except NotImplementedError:
            # Many SDRs like HackRF or AirSpy do not need to calculate SYNC_RX_CHUNK_SIZE
            # as default values are either fine or given by the hardware
            pass

        if cls.ASYNCHRONOUS:
            ret = cls.enter_async_receive_mode(data_connection, ctrl_connection)
        else:
            ret = cls.prepare_sync_receive(ctrl_connection)

        if ret != 0:
            ctrl_connection.send("failed to start rx mode")
            return False

        exit_requested = False
        ctrl_connection.send("successfully started rx mode")

        while not exit_requested:
            if cls.ASYNCHRONOUS:
                try:
                    time.sleep(0.25)
                except KeyboardInterrupt:
                    pass
            else:
                cls.receive_sync(data_connection)
            while ctrl_connection.poll():
                result = cls.process_command(ctrl_connection.recv(), ctrl_connection, is_tx=False)
                if result == cls.Command.STOP.name:
                    exit_requested = True
                    break

        cls.shutdown_device(ctrl_connection, is_tx=False)
        data_connection.close()
        ctrl_connection.close()
Exemplo n.º 32
0
 def statistics_worker(stop_flag: Event, statistics: Array,
                       write_period: int,
                       statistics_file_path: pathlib.Path,
                       log_connection: Connection):
     log_connection.send("statistics logger is online")
     # Snapshot of last written statistic to calculate difference
     last_statistics = [0] * len(statistics)
     start_at = time.time()
     Digestion.print_statistic_row(Digestion.STATSTIC_FILE_HEADER)
     with statistics_file_path.open("w") as statistics_file:
         statistics_writer = csv.writer(statistics_file)
         statistics_writer.writerow(Digestion.STATSTIC_FILE_HEADER)
         statistics_file.flush()
         while not stop_flag.is_set():
             # Prepare array for next write
             current_statistics = []
             # Wait for next write
             stop_flag.wait(write_period)
             # Calculate seconds after start
             current_time = int(time.time() - start_at)
             # Get current statistics
             for value in statistics:
                 current_statistics.append(value)
             # Initialize csv row
             csv_row = [current_time]
             # Assign current statistics to csv row
             for value in current_statistics:
                 csv_row.append(value)
             # Calulate differences to last statistics (= rates)
             for idx in range(0, len(current_statistics)):
                 csv_row.append(current_statistics[idx] -
                                last_statistics[idx])
             # Write csv row to csv file
             statistics_writer.writerow(csv_row)
             statistics_file.flush()
             # Output to console
             Digestion.print_statistic_row(csv_row)
             # Assign new 'snapshot'
             last_statistics = current_statistics
     log_connection.send("statistics logger is offline")
     log_connection.close()
Exemplo n.º 33
0
def _subproc(conn: Connection, remote_conn: Connection, fn_path: str) -> None:
    remote_conn.close()

    with open(fn_path, "rb") as f:
        env = cloudpickle.load(f)()

    # notify if it's ready
    conn.send("ready")

    while True:
        command = conn.recv()
        if command[0] == "step":
            observation, reward, terminal, info = env.step(command[1])
            conn.send([observation, reward, terminal, info])
        elif command[0] == "reset":
            conn.send([env.reset()])
        elif command[0] == "close":
            conn.close()
            break
        else:
            raise ValueError(f"invalid {command[0]}.")
Exemplo n.º 34
0
 def _handle(self, conn_lru_dict: LRUCacheType[multiprocessing_connection.Connection, bool],
             conn: multiprocessing_connection.Connection, c_send: multiprocessing_connection.Connection):
     try:
         data = conn.recv_bytes()
         if not data:
             raise EOFError
         self.logger.debug("parse conn %s" % conn)
         # self.logger.debug(data)
         try:
             result = self.handle(data)
             if result is not None:
                 conn.send_bytes(result)
         except Exception:
             self.logger.exception("handle error")
         conn_lru_dict[conn] = True
         c_send.send_bytes(b'ok')
     except OSError:
         self.logger.debug("conn %s was closed" % conn)
         conn.close()
     except EOFError:
         self.logger.debug("conn %s was eof" % conn)
         conn.close()
     except BrokenPipeError:
         self.logger.debug("conn %s was broken" % conn)
         conn.close()