示例#1
0
    def run_dispatcher() -> None:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        thread_send_queue: "ThreadQueue[LayoutUpdate]" = ThreadQueue()
        async_recv_queue: "AsyncQueue[LayoutEvent]" = AsyncQueue()

        async def send_coro(value: Any) -> None:
            thread_send_queue.put(value)

        async def recv_coro() -> Any:
            return await async_recv_queue.get()

        async def main() -> None:
            async with make_dispatcher() as dispatcher:
                await dispatcher.run(send_coro, recv_coro, context)

        main_future = asyncio.ensure_future(main())

        dispatch_thread_info_ref.current = _DispatcherThreadInfo(
            dispatch_loop=loop,
            dispatch_future=main_future,
            thread_send_queue=thread_send_queue,
            async_recv_queue=async_recv_queue,
        )
        dispatch_thread_info_created.set()

        loop.run_until_complete(main_future)
示例#2
0
    def __call__(self, num_steps: int, num_episodes: int):

        if not os.path.exists(self.folder):
            os.mkdir(self.folder)

        filename = os.path.join(self.folder, self.__class__.__name__ + '_' +
                                '_'.join([solver.NAME for solver in self.solvers]) +
                                f"_{num_steps}steps_{num_episodes}episodes")

        for episode in range(1, num_episodes + 1):
            for solver in self.solvers:
                self.job_queue.put((episode, solver))

        plotter_queue = ThreadQueue()
        # start monitor
        monitor = Thread(target=self.monitor, args=(plotter_queue, filename, num_steps, num_episodes))
        monitor.start()

        # start job processor
        processors = []
        for _ in range(self.ncpu):
            processors.append(Process(target=self.processor, args=(num_steps,)))
            processors[-1].start()

        self.plotter(self.solvers, plotter_queue, num_steps, filename)

        # wait for processors to quit
        for p in processors:
            p.join()
示例#3
0
文件: flask.py 项目: idom-team/idom
    def run_dispatcher() -> None:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        thread_send_queue: "ThreadQueue[LayoutUpdate]" = ThreadQueue()
        async_recv_queue: "AsyncQueue[LayoutEvent]" = AsyncQueue()

        async def send_coro(value: Any) -> None:
            thread_send_queue.put(value)

        async def recv_coro() -> Any:
            return await async_recv_queue.get()

        async def main() -> None:
            await serve_json_patch(
                idom.Layout(
                    ConnectionContext(component,
                                      value=Connection(request, websocket,
                                                       path))),
                send_coro,
                recv_coro,
            )

        main_future = asyncio.ensure_future(main())

        dispatch_thread_info_ref.current = _DispatcherThreadInfo(
            dispatch_loop=loop,
            dispatch_future=main_future,
            thread_send_queue=thread_send_queue,
            async_recv_queue=async_recv_queue,
        )
        dispatch_thread_info_created.set()

        loop.run_until_complete(main_future)
示例#4
0
 def __init__(self):
     self._request = ThreadQueue(1024)
     self._notify, self._wakeup = socketpair()
     self._notify.setblocking(False)
     self._wakeup.setblocking(False)
     self._worker = Thread(target=self.asyncio_worker)
     self._worker.start()
示例#5
0
    def __init__(self, amqp_url, routing_key, publish_tempo_sec,
                 exchange_name):
        '''Setup the example publisher object, passing in the URL we will use
    to connect to RabbitMQ.

    :param str amqp_url: The URL for connecting to RabbitMQ

    '''
        self._channel = None
        self._connection = None

        self._acked = 0
        self._nacked = 0
        self._deliveries = []
        self._message_number = 0

        self._closing = False
        self._stopping = False
        self.connect_error = False

        self._amqp_url = amqp_url
        self._task_run_event = ThreadEvent()
        self._publish_tempo_sec = publish_tempo_sec
        self._thread_queue = ThreadQueue(maxsize=500)

        self._tempo_controller = QueueToSampleTimeControl(
            i_max=1 / self.PUBLISH_FAST_INTERVAL_SEC, dt=publish_tempo_sec)

        # will set the exchange, queue and routing_keys names for the RabbitMq
        # server running on amqp_url
        self._rabbit_exchange_name = exchange_name
        self._rabbit_routing_key = routing_key
示例#6
0
    def __init__(self, host, vhost, callback=None, threadsize=1):

        self.init_connect(host, vhost)
        self.callback = callback
        self.queue = ThreadQueue()
        self.producer = ServerProducer(self.connection)
        self.initRecv(threadsize)
        self.startThread()
示例#7
0
    def __init__(self, host, vhost, username, password,  nodeName, callback):
        self.init_connect(host, vhost, username, password)
        self.nodeName = nodeName
        self.callback = callback
        self.queue = ThreadQueue()
        self.produder = ClientProducer(self.connection, 'HOST')
        self.initRecv()

        self.startThread()
示例#8
0
def get_task_queue(cpu_count):
    """
        Gets the queue based on cpu count
    """
    if cpu_count == 1:
        t = ThreadQueue()
        t.close = lambda: None
        return t

    return Queue()
示例#9
0
 def __init__(self, handler, max_num_thread=None, max_num_process=None):
     self._handler = handler
     self._thread_executor = ThreadPoolExecutor(max_num_thread)
     self._process_executor = ProcessPoolExecutor(max_num_process)
     self._asyncio_executor = AsyncioExecutor()
     self._notify, self._wakeup = socketpair()
     self._queue = ThreadQueue(1024)
     self.agent_task = None
     self._is_exiting = False
     self._notify_lock = ThreadLock()
示例#10
0
 def _start_thread(self, more_sample_content):
     pool = ThreadPool(len(more_sample_content))
     self._thread_queue = ThreadQueue()
     sample_data = [(index, sample) for index, sample in enumerate(more_sample_content)]
     pool.apply_async(self._thread_task, args=(sample_data, ))
     pool.close()
     pool.join()
     result = list()
     while not self._thread_queue.empty():
         result.append(self._thread_queue.get())
         self._thread_queue.task_done()
     result = map(lambda item: item[1], sorted(result, key=lambda item: item[0]))
     return list(result)
示例#11
0
    def load(cls, filepath):
        # start plotter

        with open(filepath, 'rb') as reader:
            num_steps, pickled_solvers_length = struct.unpack('II', reader.read(8))
            solvers = pickle.loads(reader.read(pickled_solvers_length))

            title_length = struct.unpack('I', reader.read(4))[0]
            cls.TITLE = reader.read(title_length).decode()
            plotter_queue = ThreadQueue()
            helper = Thread(target=cls._load_helper, args=(reader, plotter_queue, num_steps, solvers))
            helper.start()
            cls.plotter(solvers, plotter_queue, num_steps, None)
            cls.TITLE = ''

        plt.show()
示例#12
0
文件: events.py 项目: Fadesml/ccam
async def start_face_recognition_service(database: Database,
                                         bot: SimpleLongPollBot) -> None:
    # создание очереди для обмена изображениями между процессами
    queue = ThreadQueue()
    async_queue = AsyncQueue(
        queue
    )  # создание асинхронной обертки над синхронной очередью, чтобы не блокировать event loop

    watcher = Watcher(DELAY_FOR_SEND_TO_VK, PATH_TO_IMAGES)
    Thread(target=watcher.run, args=(queue, CAMERA_ID)).start(
    )  # запуск синхронную проверку камеры в другом процессе, чтобы не блокировать event loop

    mailer = VkMailer(database, bot)
    while True:
        images = await async_queue.get(
        )  # получение изображений из очереди(другого процесса)
        await mailer.notify_users(images, ALERT_TEXT)
示例#13
0
    def __init__(self,
                 name: str,
                 path: Optional[str] = None,
                 dtypes: Optional[dict] = None,
                 fmt: Optional[str] = None,
                 chunk_size: int = 0,
                 stream_type: str = "stream",
                 store: bool = False):
        """
        Creates a Stream object and attempts to build its dtypes.
        If store flag is false, spawns a thread that polls the Stream queue for incoming data

        Args:
            name: The name of the collector the stream is associated with
            path: The directory path the stream should store to (Usage dictated by super classes)
            dtypes: A dictionary containing the dtypes that the stream data DataFrame has
            fmt: The file format that the stream data should be stored as

                * `fmt="parquet"` (recommended) stores the DataFrame using parquet format
                * `fmt="json"` stores the DataFrame using JSON format
                * `fmt="pickle"` stores the DataFrame using pickle format
                * `fmt="csv"` stores the DataFrame using csv format

            chunk_size: The size of chunks to send to the stream target. I.e. Insert data in chunks of chunk_size rows
            stream_type: The type of the stream (Primarily used for logging)
            store: This flag indicates whether or not the stream is being called by a Store object

        Raises:
            NotImplementedError: chunk_storage is not yet implemented.
        """
        super().__init__(name=name, dtypes=dtypes, path=path, fmt=fmt)
        if chunk_size != 0:
            raise NotImplementedError("Chunk storage not yet implemented")
        # FIXME: Implement support for sharded uploads
        self.chunk_size = 0
        self.stream_type = stream_type
        self.is_store_instance = store
        """bool: True if the super-class has base-class Store otherwise False"""
        # If the superclass is a Store instance, do not create a lock/queue
        if store:
            return
        self.lock = ThreadLock()
        """threading.Lock: Stream Lock used to signal if the stream has died"""
        self.q = ThreadQueue()
        """queue.Queue: Stream queue used to pass data into"""
示例#14
0
from archive import Archive


if __name__ == '__main__':

    logger = logging.getLogger('Log')
    logger.setLevel(logging.DEBUG)
    formatter = '%(asctime)s %(module)s(%(lineno)s):%(funcName)s [%(levelname)s]: %(message)s'
    streamhandler = logging.StreamHandler()
    streamhandler.setFormatter(logging.Formatter(formatter, datefmt='%H:%M:%S'))
    streamhandler.setLevel(logging.DEBUG)
    logger.addHandler(streamhandler)
    logger.debug('Session start')

    mpqueue = MPQueue()  # queue for multiprocessing
    threadqueue = ThreadQueue()  # queue for threading
    aqueue = MPQueue()  # for archiver

    ws = WSServer(port=Constants.wsport)
    ws.start()

    wd = WebServer(name='Flask')
    wd.start()

    serialReceiver: Dict[str, dict] = {
        'sGPS': {
            'port': '/dev/ttyACM0',
            'baud': 9600,
        },
        'sAIS': {
            'port': '/dev/ttyUSB0',
示例#15
0
    def start(self, timeout: float = 10.0) -> None:
        """
        Start the processes and queues.

        Args:
            timeout:

        Returns:

        """

        # reset the events
        self.global_abort_event.clear()
        self.local_abort_event.clear()
        self.synchronized_stop.clear()

        if self.pin_memory_queue is None:
            self.pin_memory_queue = ThreadQueue(
                self.max_queue_size_pin_thread_per_worker * self.nb_workers)

        if self.nb_workers == 0:
            # nothing to do, this will be executed synchronously on
            # the main process
            return

        if len(self.processes) != self.nb_workers:
            print(
                f'Starting jobExecutor={self}, on process={os.getpid()} nb_workers={self.nb_workers}'
            )
            logging.debug(
                f'Starting jobExecutor={self}, on process={os.getpid()} nb_workers={self.nb_workers}'
            )
            if len(self.processes) > 0 or len(self.pin_memory_threads) > 0:
                self.close()
            self.local_abort_event.clear()

            # first create the worker input/output queues
            for i in range(self.nb_workers):  # maxsize = 0
                self.worker_input_queues.append(
                    Queue(maxsize=self.max_queue_size_per_worker))
                self.worker_output_queues.append(
                    Queue(self.max_queue_size_per_worker))

            # allocate one thread per process to move the data from the
            # process memory space to the main process memory
            self.pin_memory_threads = []
            self.pin_memory_thread_stop_events = []
            for i in range(self.nb_workers):
                # stop event is used to notify the pinning thread
                # to stop its processing (e.g., so that it could be restarted)
                stop_event = Event()
                self.pin_memory_thread_stop_events.append(stop_event)

                pin_memory_thread = threading.Thread(
                    name=f'JobExecutorThreadResultCollector-{i}',
                    target=collect_results_to_main_process,
                    args=(self.job_session_id, self.jobs_processed,
                          self.worker_output_queues[i], self.pin_memory_queue,
                          self.global_abort_event, self.local_abort_event,
                          self.synchronized_stop, stop_event, self.wait_time))
                self.pin_memory_threads.append(pin_memory_thread)
                pin_memory_thread.daemon = False
                pin_memory_thread.start()
                print(f'Thread={pin_memory_thread.ident}, thread started')

            # make sure a single process can use only one thread
            with threadpool_limits(limits=1, user_api='blas'):
                for i in range(self.nb_workers):
                    p = Process(
                        target=worker,
                        name=f'JobExecutorWorker-{i}',
                        args=(self.worker_input_queues[i],
                              self.worker_output_queues[i],
                              self.function_to_run, self.global_abort_event,
                              self.local_abort_event, self.synchronized_stop,
                              self.wait_time, i))
                    p.daemon = False
                    p.start()
                    self.processes.append(p)
                    print(f'Worker={p.pid} started!')
                    logging.debug(
                        f'Child process={p.pid} for jobExecutor={self}')

            self.worker_control = 0

        if self.wait_until_processes_start:
            # wait until all the processes and threads are alive
            waiting_started = perf_counter()
            while True:
                wait_more = False
                for p in self.processes:
                    if not p.is_alive():
                        wait_more = True
                        continue
                for t in self.pin_memory_threads:
                    if not t.is_alive():
                        wait_more = True
                        continue

                if wait_more:
                    waiting_time = perf_counter() - waiting_started
                    if waiting_time < timeout:
                        sleep(self.wait_time)
                    else:
                        logging.error(
                            'the worker processes/pin threads were too slow to start!'
                        )

                break

        logging.debug(f'jobExecutor ready={self}')
示例#16
0
def execute_graph_async(dep_graph: Dict,
                        output_nodes,
                        replace_by_persistent_ids=True,
                        executor=None,
                        max_payload=None):
    executor = ThreadPool(4)
    max_payload = max_payload or multiprocessing.cpu_count()
    if isinstance(executor, ThreadPool):
        queue = ThreadQueue()
    else:
        raise RuntimeError

    reversed_mapping = {}
    if replace_by_persistent_ids:
        persistent_graph = {}

        for node_id, node_task in dep_graph.items():
            new_id = get_persistent_id(node_id)
            dependencies = list(map(get_persistent_id, node_task.dependencies))
            persistent_graph[new_id] = GraphTask(node_task.evaluate,
                                                 dependencies)
            reversed_mapping[new_id] = node_id

        dep_graph = persistent_graph
        output_nodes = [get_persistent_id(o) for o in output_nodes]

    cache = dict()
    state = build_execution_state(dep_graph, cache=cache)

    def deploy_task():
        current_task_id = state.ready.pop()
        state.running.add(current_task_id)
        current_task = dep_graph[current_task_id]
        cur_data = [state.cache[dep] for dep in current_task.dependencies]
        executor.apply_async(execute_task,
                             args=(current_task_id, current_task.evaluate,
                                   cur_data),
                             callback=queue.put)

    def cleanup_waiting_tasks():
        while len(state.running) != 0:
            t_id, _, _ = queue.get()
            process_finished_task(t_id, state, output_nodes)

        for t_id in list(state.waiting) + list(state.ready):
            dep_graph[t_id].cleanup()

    # deploy initial tasks
    while state.ready and len(state.running) < max_payload:
        deploy_task()

    # main execution loop
    while state.waiting or state.ready or state.running:
        task_id, result, failed = queue.get()
        if failed:
            exc = result
            if dep_graph[task_id].rerun_locally:
                warnings.warn(f'Try to rerun task {task_id}', RuntimeWarning)
                # try to rerun task if necessary
                task = dep_graph[task_id]
                try:
                    data = [state.cache[dep] for dep in task.dependencies]
                    result = task.evaluate(data)
                except BaseException as e:
                    # inform waiting tasks
                    process_finished_task(task_id, state, output_nodes)
                    cleanup_waiting_tasks()
                    raise e

            else:
                # inform waiting tasks
                process_finished_task(task_id, state, output_nodes)
                cleanup_waiting_tasks()
                raise exc

        state.cache[task_id] = result
        process_finished_task(task_id, state, output_nodes)
        while state.ready and len(state.running) < max_payload:
            deploy_task()

    result = dict((reversed_mapping.get(name, name), state.cache[name])
                  for name in output_nodes)
    return result
示例#17
0
    def start(self, timeout: float = 10.0) -> None:
        """
        Start the processes and queues.

        Args:
            timeout:

        Returns:

        """
        if self.pin_memory_queue is None:
            self.pin_memory_queue = ThreadQueue(
                self.max_queue_size_pin_thread_per_worker * self.nb_workers)

        if self.nb_workers == 0:
            # nothing to do, this will be executed synchronously on
            # the main process
            return

        if len(self.processes) != self.nb_workers:
            logging.debug(
                f'Starting jobExecutor={self}, on process={os.getpid()} nb_workers={self.nb_workers}'
            )
            self.close()
            self.abort_event.clear()

            with threadpool_limits(limits=1, user_api='blas'):
                for i in range(self.nb_workers):  #maxsize = 0
                    self.worker_input_queues.append(
                        Queue(maxsize=self.max_queue_size_per_worker))
                    self.worker_output_queues.append(
                        Queue(self.max_queue_size_per_worker))

                    p = Process(target=worker,
                                name=f'JobExecutorWorker-{i}',
                                args=(self.worker_input_queues[i],
                                      self.worker_output_queues[i],
                                      self.function_to_run, self.abort_event,
                                      self.wait_time, i))
                    p.daemon = True
                    p.start()
                    self.processes.append(p)
                    logging.debug(
                        f'Child process={p.pid} for jobExecutor={self}')

            self.pin_memory_threads = []
            for i in range(self.nb_pin_threads):
                pin_memory_thread = threading.Thread(
                    name=f'JobExecutorThreadResultCollector-{i}',
                    target=collect_results_to_main_process,
                    args=(self.job_session_id, self.jobs_processed,
                          self.worker_output_queues, self.pin_memory_queue,
                          self.abort_event, self.wait_time))
                self.pin_memory_threads.append(pin_memory_thread)
                pin_memory_thread.daemon = True
                pin_memory_thread.start()

            self.worker_control = 0

        if self.wait_until_processes_start:
            # wait until all the processes and threads are alive
            waiting_started = perf_counter()
            while True:
                wait_more = False
                for p in self.processes:
                    if not p.is_alive():
                        wait_more = True
                        continue
                for t in self.pin_memory_threads:
                    if not t.is_alive():
                        wait_more = True
                        continue

                if wait_more:
                    waiting_time = perf_counter() - waiting_started
                    if waiting_time < timeout:
                        sleep(self.wait_time)
                    else:
                        logging.error(
                            'the worker processes/pin threads were too slow to start!'
                        )

                break
        logging.debug(f'jobExecutor ready={self}')
示例#18
0
 def __init__(self):
     super(DistributorThread, self).__init__(ThreadQueue(), ThreadEvent())
示例#19
0
 def __init__(self, connection):
     self.queue_names_lock = threading.Lock()
     self.queue_names = []
     self.connection = connection
     self.active = False
     self.output_queue = ThreadQueue()
示例#20
0
 def __init__(self, handler):
     WrapperHandler.__init__(self, handler)
     self.queue = ThreadQueue(-1)
     self.controller = TWHThreadController(self)
     self.controller.start()
示例#21
0
 def __init__(self, subscribers=None, queue_limit=10):
     self.members = []
     self.queue = ThreadQueue(queue_limit)
     for subscriber in subscribers or []:
         self.add(subscriber)