Пример #1
0
    def __init__(self,
                 client,
                 group,
                 topic,
                 auto_commit=True,
                 auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
                 auto_commit_every_t=AUTO_COMMIT_INTERVAL,
                 num_procs=1,
                 partitions_per_proc=0):

        # Initiate the base consumer class
        super(MultiProcessConsumer,
              self).__init__(client,
                             group,
                             topic,
                             partitions=None,
                             auto_commit=auto_commit,
                             auto_commit_every_n=auto_commit_every_n,
                             auto_commit_every_t=auto_commit_every_t)

        # Variables for managing and controlling the data flow from
        # consumer child process to master
        self.queue = MPQueue(1024)  # Child consumers dump messages into this
        self.start = Event()  # Indicates the consumers to start fetch
        self.exit = Event()  # Requests the consumers to shutdown
        self.pause = Event()  # Requests the consumers to pause fetch
        self.size = Value('i', 0)  # Indicator of number of messages to fetch

        partitions = self.offsets.keys()

        # If unspecified, start one consumer per partition
        # The logic below ensures that
        # * we do not cross the num_procs limit
        # * we have an even distribution of partitions among processes
        if not partitions_per_proc:
            partitions_per_proc = round(len(partitions) * 1.0 / num_procs)
            if partitions_per_proc < num_procs * 0.5:
                partitions_per_proc += 1

        # The final set of chunks
        chunker = lambda *x: [] + list(x)
        chunks = map(chunker, *[iter(partitions)] * int(partitions_per_proc))

        self.procs = []
        for chunk in chunks:
            chunk = filter(lambda x: x is not None, chunk)
            args = (client.copy(), group, topic, chunk, self.queue, self.start,
                    self.exit, self.pause, self.size)

            proc = Process(target=_mp_consume, args=args)
            proc.daemon = True
            proc.start()
            self.procs.append(proc)
    def _start(self):
        if len(self._threads) == 0:
            logging.debug("starting workers")
            self._queue_loop = 0
            self._end_ctr = 0

            for i in range(self.num_processes):
                self._queues.append(MPQueue(self.num_cached_per_queue))
                self._threads.append(Process(target=producer, args=(self._queues[i], self.generator, self.transform, i, self.seeds[i])))
                self._threads[-1].daemon = True
                self._threads[-1].start()
        else:
            logging.debug("MultiThreadedGenerator Warning: start() has been called but workers are already running")
Пример #3
0
    def run_multiple_ant_colony_system(self, file_to_write_path=None):
        """
        开启另外的线程来跑multiple_ant_colony_system, 使用主线程来绘图
        :return:
        """
        path_queue_for_figure = MPQueue()
        multiple_ant_colony_system_thread = Process(target=self._multiple_ant_colony_system, args=(path_queue_for_figure, file_to_write_path, ))
        multiple_ant_colony_system_thread.start()

        # 是否要展示figure
        if self.whether_or_not_to_show_figure:
            figure = VrptwAcoFigure(self.graph.nodes, path_queue_for_figure)
            figure.run()
        multiple_ant_colony_system_thread.join()
Пример #4
0
 def test_read_data_multiprocessing(self):
     set_start_method("spawn")
     workers = 1
     output_queue = MPQueue()
     processes = [
         Process(target=run_read, args=[output_queue])
         for i in range(workers)
     ]
     [process.start() for process in processes]
     [process.join() for process in processes]
     for i in range(workers):
         data = output_queue.get(timeout=0.1)
         assert_that(data["frame"][0][0][0], is_(equal_to(97)))
         assert_that(data["frame"][0][0][1], is_(equal_to(98)))
         assert_that(data["frame"][0][0][2], is_(equal_to(99)))
         assert_that(data["test1"], is_(equal_to(1)))
         assert_that(data["test2"], is_(equal_to(2)))
    def _start(self):
        self._queue = MPQueue(self.num_cached)

        def producer(queue, generator):
            try:
                for item in generator:
                    queue.put(item)
            except:
                print "oops..."
            finally:
                queue.put("end")

        for _ in xrange(self.num_processes):
            np.random.seed()
            self._threads.append(Process(target=producer, args=(self._queue, self.generator)))
            self._threads[-1].daemon = True
            self._threads[-1].start()
Пример #6
0
    def __init__(self, buddy_configuration, logger):
        """
        Initialize the object.
        """

        self.__configuration__ = buddy_configuration
        self.__logger__ = logger
        self.__lcd__ = None
        self.__lcd_status_id__ = 0
        self.__initialize_lcd__()
        self.__is_gas_detected__ = False
        self.__system_start_time__ = datetime.datetime.now()
        self.__sensors__ = Sensors(buddy_configuration)

        serial_connection = self.__initialize_modem__()
        if serial_connection is None and not local_debug.is_debug():
            self.__logger__.log_warning_message(
                "Unable to initialize serial connection, quiting.")
            sys.exit()

        self.__fona_manager__ = FonaManager(
            self.__logger__, serial_connection,
            self.__configuration__.cell_power_status_pin,
            self.__configuration__.cell_ring_indicator_pin,
            self.__configuration__.utc_offset)

        # create heater relay instance
        self.__relay_controller__ = RelayManager(
            buddy_configuration, logger, self.__heater_turned_on_callback__,
            self.__heater_turned_off_callback__,
            self.__heater_max_time_off_callback__)
        self.__gas_sensor_queue__ = MPQueue()

        self.__logger__.log_info_message(
            "Starting SMS monitoring and heater service")
        self.__clear_existing_messages__()

        self.__logger__.log_info_message("Begin monitoring for SMS messages")
        self.__queue_message_to_all_numbers__(
            "HangarBuddy monitoring started." + "\n" +
            self.__get_help_status__())
        self.__queue_message_to_all_numbers__(self.__get_full_status__())
        self.__lcd__.clear()
        self.__lcd__.write(0, 0, "Ready")
Пример #7
0
    def crawl(self,
              proc=None,
              domain_depth=0,
              crawl_depth=0,
              page_limit=None,
              wait_courtesy=0,
              html2txt=False,
              metas=None):
        """
        :param proc:           amount of processes to spawn, 0 or None can be used to exploit the current process
        :param domain_depth:   crawling depth for each seed element (inside original domain)
        :param crawl_depth:    crawling depth for each seed element (outside original domain)
        :param page_limit:     max amount of page to crawl
        :param wait_courtesy:  time in second between each fetch
        :param html2txt:       resulting pages must be raw html (default), or cleant txt
        :param metas:          metas we want to extract during crawling
        """

        self.domain_depth = domain_depth
        self.crawl_depth = crawl_depth
        self.page_limit = page_limit
        # lazy loading, to know if we need to implement seeds with multiproc or not
        if self.seed is None:
            if self.filename is not None:
                self.seed = Seed(f=self.filename,
                                 multiproc=not (proc is None or proc == 0))
            elif self.seedlist is not None:
                self.seed = Seed(s=self.seedlist,
                                 multiproc=not (proc is None or proc == 0))

        if proc is None or proc == 0:
            self.storage = Queue()  # Will contain shared crawl results
            self._sub_crawl(self.seed.q, self.storage, Event(), wait_courtesy,
                            html2txt, metas, None)
            while True:
                try:
                    el = self.storage.get(block=False)
                    yield el
                except Empty:
                    break
        else:
            self.storage = MPQueue()  # Will contain shared crawl results
            yield from self.spawn_crawl_processes(html2txt, metas, proc,
                                                  wait_courtesy)
Пример #8
0
    def __init__(self, multiprocess=False, is_deque=True, max_len=200):
        if multiprocess:
            is_deque = False

        self._queue = None
        self._size = 0  # qsize() might not be implemented for MP queue
        self._use_deque = is_deque
        self._use_mpqueue = multiprocess
        if self._use_deque:
            if max_len == 0:
                max_len = None
            self._queue = deque(maxlen=max_len)
        else:
            if max_len is None:
                max_len = 0
            if self._use_mpqueue:
                self._queue = MPQueue(maxsize=max_len)
            else:
                self._queue = Queue(maxsize=max_len)
Пример #9
0
    def __init__(self, configuration, logger, heater_on_callback,
                 heater_off_callback, heater_max_time_callback):
        """ Initialize the object. """

        self.__configuration__ = configuration
        self.__logger__ = logger
        self.__on_callback__ = heater_on_callback
        self.__off_callback__ = heater_off_callback
        self.__max_time_callback__ = heater_max_time_callback

        # create heater relay instance
        self.__heater_relay__ = PowerRelay("heater_relay",
                                           configuration.heater_pin)
        self.__heater_queue__ = MPQueue()

        # create queue to hold heater timer.
        self.__heater_shutoff_timer__ = None

        # make sure and turn heater off
        self.__heater_relay__.switch_low()
Пример #10
0
    def __init__(self):

        super().__init__()
        self.daemon = True
        self.name = 'Collector'

        self.inputQueue = Queue()
        self.outputQueue = MPQueue()
        self.receiver: Dict[str, Receiver] = {}
        self.f450 = Format450()

        for a in range(1, 17):
            name = 'CH%02d' % a
            ip = '239.192.0.%d' % a
            port = 60000 + a
            t = Receiver(name=name,
                         params=Antenna(ip=ip,
                                        port=port,
                                        streamType=StreamType.Type450),
                         qp=self.inputQueue)
            self.receiver[name] = t
 def __init__(self,
              vname_to_line_config_dict=None,
              ylim=(0, 70),
              window_area=50,
              update_interval=0.01):
     '''
     :param vname_to_line_config_dict:
     以变量名作为key,config数组作为value,
     :param ylim:
     :param xlim:
     :param window_area:   显示历史多少个数据,避免x无限大
     :param update_interval:
     '''
     self.y_buff = 10
     self.update_interval = update_interval
     self.ylim = ylim
     self.window_area = window_area
     self.queue = MPQueue()
     self.vname_to_lines = {}
     self.vname_to_line_config_dict = vname_to_line_config_dict
     self.vname_to_data = []
Пример #12
0
def test_tracer_usage_multiprocess():
    q = MPQueue()

    # Similar to test_multiprocess(), ensures that no collisions are
    # generated between parent and child processes while using
    # multiprocessing.

    # Note that we have to be wary of the size of the underlying
    # pipe in the queue: https://bugs.python.org/msg143081

    def target(q):
        ids_list = list(
            chain.from_iterable(
                (s.span_id, s.trace_id)
                for s in [tracer.start_span("s") for _ in range(10)]))
        q.put(ids_list)

    ps = [mp.Process(target=target, args=(q, )) for _ in range(30)]
    for p in ps:
        p.start()

    for p in ps:
        p.join()

    ids_list = list(
        chain.from_iterable(
            (s.span_id, s.trace_id)
            for s in [tracer.start_span("s") for _ in range(100)]))
    ids = set(ids_list)
    assert len(ids) == len(ids_list), "Collisions found in ids"

    while not q.empty():
        child_ids_list = q.get()
        child_ids = set(child_ids_list)

        assert len(child_ids) == len(
            child_ids_list), "Collisions found in subprocess ids"

        assert ids & child_ids == set()
        ids = ids | child_ids  # accumulate the ids
Пример #13
0
    def __init__(self, *, path: str, timeout: int = 5, buffersize: int = 32):
        super().__init__()
        self.daemon = False
        self.name = 'SQLite'
        self.live: bool = True

        self.qp: MPQueue = MPQueue()
        self.path = pathlib.Path(path)  # path for *.db
        self.nameformat: str = '%04d-%02d-%02d.db'
        self.dateformat: str = '%Y-%m-%d %H:%M:%S.%f'
        self.locker = Lock()

        self.counter: int = 0
        now = dt.now()
        self.lastat: dt = now
        self.lastsave: dt = now
        self.timeout = timeout
        # self.buffer: List[Record] = []
        self.buffersize = buffersize

        self.fifo = Queue()

        self.schema = 'CREATE TABLE "sentence" ( \
Пример #14
0
    def __init__(self, logger, serial_connection, power_status_pin,
                 ring_indicator_pin):

        self.__logger__ = logger
        self.__modem_access_lock__ = threading.Lock()
        self.serial_connection = serial_connection
        self.power_status_pin = power_status_pin
        self.ring_indicator_pin = ring_indicator_pin

        if self.serial_connection is not None:
            self.serial_connection.flushInput()
            self.serial_connection.flushOutput()

        self.__send_command__("AT")
        # self.send_command("AE0")
        self.__disable_verbose_errors__()
        self.__set_sms_mode__()

        self.__read_from_fona__(10)

        self.__message_waiting_queue__ = MPQueue()
        self.__initialize_gpio_pins__()
        self.__poll_for_messages__()
Пример #15
0
    def __init__(self,
                 *,
                 serialPort: str = '',
                 baudrate: int = 0,
                 mcip: str = '',
                 mcport: int = 0,
                 outQueue: queue.Queue,
                 infoQueue: queue.Queue):

        self.useVDO = False

        self.fragment = []
        self.seq = 0

        self.engine = Dispatcher()

        self.quePoint = MPQueue()
        self.counter = 0

        self.outQueue = outQueue
        self.infoQueue = infoQueue

        self.w = Thread(target=self.welcome, daemon=True)
        self.w.start()

        # if serialPort:
        #     logger.debug('+++ use Serial')
        #     self.serialPort = serialPort
        #     self.baudrate = baudrate
        #     self.s = Thread(target=self.fromSerial, daemon=True)
        #     self.s.start()

        if mcip:
            logger.debug('+++ use UDP(multicast)')
            self.u = fromUDP(mcip=mcip, mcport=mcport, quePoint=self.quePoint)
            self.u.start()
Пример #16
0
    def __init__(
        self,
        roidb,
        num_loaders=4,
        minibatch_queue_size=64,
        blobs_queue_capacity=8,
        num_augmentation_processes=8,
    ):
        self._roidb = roidb
        for roi in roidb:
            roi.pop('dataset', None)  # pop the reference to prevent duplication
        self._lock = threading.Lock()
        self._perm = deque(range(len(self._roidb)))
        self._cur = 0  # _perm cursor
        self._counter = 0
        # The minibatch queue holds prepared training data in host (CPU) memory
        # When training with N > 1 GPUs, each element in the minibatch queue
        # is actually a partial minibatch which contributes 1 / N of the
        # examples to the overall minibatch
        self._minibatch_queue = Queue.Queue(maxsize=minibatch_queue_size)
        self._minibatch_queue_mp = MPQueue(minibatch_queue_size)

        self._blobs_queue_capacity = blobs_queue_capacity
        # Random queue name in case one instantiates multple RoIDataLoaders
        self._loader_id = uuid.uuid4()
        self._blobs_queue_name = 'roi_blobs_queue_{}'.format(self._loader_id)
        # Loader threads construct (partial) minibatches and put them on the
        # minibatch queue
        self._num_loaders = num_loaders
        self._num_gpus = cfg.NUM_GPUS
        self.coordinator = Coordinator()

        self._output_names = get_minibatch_blob_names()
        self._shuffle_roidb_inds()
        self.create_threads()
        self.num_augmentation_processes = num_augmentation_processes
Пример #17
0
def test_tracer_usage_fork():
    q = MPQueue()
    pid = os.fork()

    # Similar test to test_fork() above except we use the tracer API.
    # In this case we expect to never have collisions.
    if pid > 0:
        # parent
        parent_ids_list = list(
            chain.from_iterable(
                (s.span_id, s.trace_id)
                for s in [tracer.start_span("s") for _ in range(100)]))
        parent_ids = set(parent_ids_list)
        assert len(parent_ids) == len(
            parent_ids_list), "Collisions found in parent process ids"

        child_ids_list = q.get()

        child_ids = set(child_ids_list)

        assert len(child_ids) == len(
            child_ids_list), "Collisions found in child process ids"
        assert parent_ids & child_ids == set()
    else:
        # child
        try:
            child_ids = list(
                chain.from_iterable(
                    (s.span_id, s.trace_id)
                    for s in [tracer.start_span("s") for _ in range(100)]))
            q.put(child_ids)
        finally:
            # Kill the process so it doesn't continue running the rest of the
            # test suite in a separate process. Note we can't use sys.exit()
            # as it raises an exception that pytest will detect as an error.
            os._exit(0)
Пример #18
0
def test_fork_pid_check():
    q = MPQueue()
    pid = os.fork()

    # Generate random numbers in the parent and child processes after forking.
    # The child sends back their numbers to the parent where we check to see
    # if we get collisions or not.
    if pid > 0:
        # parent
        rns = {_rand.rand64bits() for _ in range(100)}
        child_rns = q.get()

        assert rns & child_rns == set()

    else:
        # child
        try:
            rngs = {_rand.rand64bits() for _ in range(100)}
            q.put(rngs)
        finally:
            # Kill the process so it doesn't continue running the rest of the
            # test suite in a separate process. Note we can't use sys.exit()
            # as it raises an exception that pytest will detect as an error.
            os._exit(0)
Пример #19
0
    def run_real(self):
        self.shared_memory = SharedMemory(create=True,
                                          size=self.max_shared_memory)
        self.log.debug(
            f'Created shared memory of size: {self.shared_memory.size / 1024 / 1024:.02f} MiB'
        )

        # create the shared memory segments and add them to their respective pools
        for i in range(
                int(self.shared_memory.size / self.analysis.biggest_chunk)):
            _sms = SharedMemorySegment(offset=i * self.analysis.biggest_chunk,
                                       end=i * self.analysis.biggest_chunk +
                                       self.analysis.biggest_chunk)
            self.sms.append(_sms)

        self.log.debug(f'Created {len(self.sms)} shared memory segments.')

        # Create queues
        self.dl_worker_queue = MPQueue(-1)
        self.writer_queue = MPQueue(-1)
        self.dl_result_q = MPQueue(-1)
        self.writer_result_q = MPQueue(-1)

        self.log.info(f'Starting download workers...')
        for i in range(self.max_workers):
            w = DLWorker(f'DLWorker {i + 1}',
                         self.dl_worker_queue,
                         self.dl_result_q,
                         self.shared_memory.name,
                         logging_queue=self.logging_queue,
                         dl_timeout=self.dl_timeout)
            self.children.append(w)
            w.start()

        self.log.info('Starting file writing worker...')
        writer_p = FileWorker(self.writer_queue, self.writer_result_q,
                              self.dl_dir, self.shared_memory.name,
                              self.cache_dir, self.logging_queue)
        self.children.append(writer_p)
        writer_p.start()

        num_chunk_tasks = sum(isinstance(t, ChunkTask) for t in self.tasks)
        num_dl_tasks = len(self.chunks_to_dl)
        num_tasks = len(self.tasks)
        num_shared_memory_segments = len(self.sms)
        self.log.debug(
            f'Chunks to download: {num_dl_tasks}, File tasks: {num_tasks}, Chunk tasks: {num_chunk_tasks}'
        )

        # active downloader tasks
        self.active_tasks = 0
        processed_chunks = 0
        processed_tasks = 0
        total_dl = 0
        total_write = 0

        # synchronization conditions
        shm_cond = Condition()
        task_cond = Condition()
        self.conditions = [shm_cond, task_cond]

        # start threads
        s_time = time.time()
        self.threads.append(
            Thread(target=self.download_job_manager,
                   args=(task_cond, shm_cond)))
        self.threads.append(
            Thread(target=self.dl_results_handler, args=(task_cond, )))
        self.threads.append(
            Thread(target=self.fw_results_handler, args=(shm_cond, )))

        for t in self.threads:
            t.start()

        last_update = time.time()

        while processed_tasks < num_tasks:
            delta = time.time() - last_update
            if not delta:
                time.sleep(self.update_interval)
                continue

            # update all the things
            processed_chunks += self.num_processed_since_last
            processed_tasks += self.num_tasks_processed_since_last

            total_dl += self.bytes_downloaded_since_last
            total_write += self.bytes_written_since_last

            dl_speed = self.bytes_downloaded_since_last / delta
            dl_unc_speed = self.bytes_decompressed_since_last / delta
            w_speed = self.bytes_written_since_last / delta
            r_speed = self.bytes_read_since_last / delta
            # c_speed = self.num_processed_since_last / delta

            # set temporary counters to 0
            self.bytes_read_since_last = self.bytes_written_since_last = 0
            self.bytes_downloaded_since_last = self.num_processed_since_last = 0
            self.bytes_decompressed_since_last = self.num_tasks_processed_since_last = 0
            last_update = time.time()

            perc = (processed_chunks / num_chunk_tasks) * 100
            runtime = time.time() - s_time
            total_avail = len(self.sms)
            total_used = (num_shared_memory_segments - total_avail) * (
                self.analysis.biggest_chunk / 1024 / 1024)

            if runtime and processed_chunks:
                rt_hours, runtime = int(runtime // 3600), runtime % 3600
                rt_minutes, rt_seconds = int(runtime // 60), int(runtime % 60)

                average_speed = processed_chunks / runtime
                estimate = (num_chunk_tasks - processed_chunks) / average_speed
                hours, estimate = int(estimate // 3600), estimate % 3600
                minutes, seconds = int(estimate // 60), int(estimate % 60)
            else:
                hours = minutes = seconds = 0
                rt_hours = rt_minutes = rt_seconds = 0

            # rounds the numbers to the base
            def round_spec(x, base):
                return round(x * (base / 100))

            # returns the progress bar
            def pounds(perc, rest):
                num_pounds = round_spec(perc, rest)
                return ('#' * num_pounds) + (' ' * (rest - num_pounds))

            term_length = shutil.get_terminal_size().columns
            # base message
            message = f"{perc:.02f}%"
            stats = f"{hours:02d}:{minutes:02d}:{seconds:02d} {dl_speed / 1024 / 1024:.02f} Mib/s {total_dl / 1024 / 1024:.02f} MiB "

            # some old math i used to used, leaving it here for reference
            # rest = term_length - len(message) - ((round(shutil.get_terminal_size().columns / 2) + 2) - len(stats)) # Gets how many whitespaces we have to leave

            # I have no idea how this math works...
            rest = round((term_length - len(message) - len(stats)) /
                         3) - 1  # gets how many whitespaces we have to leave
            # adds as many spaces to the message as we need...
            message += ' ' * rest
            message += stats
            message += f"[{pounds(perc, (rest * 2))}]"

            # clears the line, and prints the message
            sys.stdout.write("\r" + message)
            # flushes stdout
            sys.stdout.flush()

            # send status update to back to instantiator (if queue exists)
            if self.status_queue:
                try:
                    self.status_queue.put(UIUpdate(progress=perc,
                                                   download_speed=dl_unc_speed,
                                                   write_speed=w_speed,
                                                   read_speed=r_speed,
                                                   memory_usage=total_used *
                                                   1024 * 1024),
                                          timeout=1.0)
                except Exception as e:
                    self.log.warning(
                        f'Failed to send status update to queue: {e!r}')

            time.sleep(self.update_interval)

        for i in range(self.max_workers):
            self.dl_worker_queue.put_nowait(DownloaderTask(kill=True))

        self.log.info('Waiting for installation to finish...')
        self.writer_queue.put_nowait(WriterTask('', kill=True))

        writer_p.join(timeout=10.0)
        if writer_p.exitcode is None:
            self.log.warning(f'Terminating writer process, no exit code!')
            writer_p.terminate()

        # forcibly kill DL workers that are not actually dead yet
        for child in self.children:
            if child.exitcode is None:
                child.terminate()

        # make sure all the threads are dead.
        for t in self.threads:
            t.join(timeout=5.0)
            if t.is_alive():
                self.log.warning(f'Thread did not terminate! {repr(t)}')

        # clean up resume file
        if self.resume_file:
            try:
                os.remove(self.resume_file)
            except OSError as e:
                self.log.warning(f'Failed to remove resume file: {e!r}')

        # close up shared memory
        self.shared_memory.close()
        self.shared_memory.unlink()
        self.shared_memory = None

        self.log.info('All done! Download manager quitting...')
        # finally, exit the process.
        exit(0)
Пример #20
0
    def run_real(self):
        self.shared_memory = SharedMemory(create=True, size=self.max_shared_memory)
        self.log.debug(f'Created shared memory of size: {self.shared_memory.size / 1024 / 1024:.02f} MiB')

        # create the shared memory segments and add them to their respective pools
        for i in range(int(self.shared_memory.size / self.analysis.biggest_chunk)):
            _sms = SharedMemorySegment(offset=i * self.analysis.biggest_chunk,
                                       end=i * self.analysis.biggest_chunk + self.analysis.biggest_chunk)
            self.sms.append(_sms)

        self.log.debug(f'Created {len(self.sms)} shared memory segments.')

        # Create queues
        self.dl_worker_queue = MPQueue(-1)
        self.writer_queue = MPQueue(-1)
        self.dl_result_q = MPQueue(-1)
        self.writer_result_q = MPQueue(-1)

        self.log.info(f'Starting download workers...')
        for i in range(self.max_workers):
            w = DLWorker(f'DLWorker {i + 1}', self.dl_worker_queue, self.dl_result_q,
                         self.shared_memory.name, logging_queue=self.logging_queue,
                         dl_timeout=self.dl_timeout)
            self.children.append(w)
            w.start()

        self.log.info('Starting file writing worker...')
        writer_p = FileWorker(self.writer_queue, self.writer_result_q, self.dl_dir,
                              self.shared_memory.name, self.cache_dir, self.logging_queue)
        self.children.append(writer_p)
        writer_p.start()

        num_chunk_tasks = sum(isinstance(t, ChunkTask) for t in self.tasks)
        num_dl_tasks = len(self.chunks_to_dl)
        num_tasks = len(self.tasks)
        num_shared_memory_segments = len(self.sms)
        self.log.debug(f'Chunks to download: {num_dl_tasks}, File tasks: {num_tasks}, Chunk tasks: {num_chunk_tasks}')

        # active downloader tasks
        self.active_tasks = 0
        processed_chunks = 0
        processed_tasks = 0
        total_dl = 0
        total_write = 0

        # synchronization conditions
        shm_cond = Condition()
        task_cond = Condition()
        self.conditions = [shm_cond, task_cond]

        # start threads
        s_time = time.time()
        self.threads.append(Thread(target=self.download_job_manager, args=(task_cond, shm_cond)))
        self.threads.append(Thread(target=self.dl_results_handler, args=(task_cond,)))
        self.threads.append(Thread(target=self.fw_results_handler, args=(shm_cond,)))

        for t in self.threads:
            t.start()

        last_update = time.time()

        while processed_tasks < num_tasks:
            delta = time.time() - last_update
            if not delta:
                time.sleep(self.update_interval)
                continue

            # update all the things
            processed_chunks += self.num_processed_since_last
            processed_tasks += self.num_tasks_processed_since_last

            total_dl += self.bytes_downloaded_since_last
            total_write += self.bytes_written_since_last

            dl_speed = self.bytes_downloaded_since_last / delta
            dl_unc_speed = self.bytes_decompressed_since_last / delta
            w_speed = self.bytes_written_since_last / delta
            r_speed = self.bytes_read_since_last / delta
            # c_speed = self.num_processed_since_last / delta

            # set temporary counters to 0
            self.bytes_read_since_last = self.bytes_written_since_last = 0
            self.bytes_downloaded_since_last = self.num_processed_since_last = 0
            self.bytes_decompressed_since_last = self.num_tasks_processed_since_last = 0
            last_update = time.time()

            perc = (processed_chunks / num_chunk_tasks) * 100
            runtime = time.time() - s_time
            total_avail = len(self.sms)
            total_used = (num_shared_memory_segments - total_avail) * (self.analysis.biggest_chunk / 1024 / 1024)

            if runtime and processed_chunks:
                rt_hours, runtime = int(runtime // 3600), runtime % 3600
                rt_minutes, rt_seconds = int(runtime // 60), int(runtime % 60)

                average_speed = processed_chunks / runtime
                estimate = (num_chunk_tasks - processed_chunks) / average_speed
                hours, estimate = int(estimate // 3600), estimate % 3600
                minutes, seconds = int(estimate // 60), int(estimate % 60)
            else:
                hours = minutes = seconds = 0
                rt_hours = rt_minutes = rt_seconds = 0

            bar.set_fraction(perc)
            bar.set_text( f'{perc:.02f}% ({processed_chunks}/{num_chunk_tasks}), '
                          f'Elapsed: {rt_hours:02d}:{rt_minutes:02d}:{rt_seconds:02d}, '
                          f'ETA: {hours:02d}:{minutes:02d}:{seconds:02d}'
                          f'{dl_speed / 1024 / 1024:.02f} MiB/s'
            )

            #self.log.info(f'= Progress: {perc:.02f}% ({processed_chunks}/{num_chunk_tasks}), '
            #              f'Running for {rt_hours:02d}:{rt_minutes:02d}:{rt_seconds:02d}, '
            #              f'ETA: {hours:02d}:{minutes:02d}:{seconds:02d}')
            #self.log.info(f' - Downloaded: {total_dl / 1024 / 1024:.02f} MiB, '
            #              f'Written: {total_write / 1024 / 1024:.02f} MiB')
            #self.log.info(f' - Cache usage: {total_used} MiB, active tasks: {self.active_tasks}')
            #self.log.info(f' + Download\t- {dl_speed / 1024 / 1024:.02f} MiB/s (raw) '
            #              f'/ {dl_unc_speed / 1024 / 1024:.02f} MiB/s (decompressed)')
            #self.log.info(f' + Disk\t- {w_speed / 1024 / 1024:.02f} MiB/s (write) / '
            #              f'{r_speed / 1024 / 1024:.02f} MiB/s (read)')

            # send status update to back to instantiator (if queue exists)
            if self.status_queue:
                try:
                    self.status_queue.put(UIUpdate(
                        progress=perc, download_speed=dl_unc_speed, write_speed=w_speed, read_speed=r_speed,
                        memory_usage=total_used * 1024 * 1024
                    ), timeout=1.0)
                except Exception as e:
                    self.log.warning(f'Failed to send status update to queue: {e!r}')

            time.sleep(self.update_interval)

        for i in range(self.max_workers):
            self.dl_worker_queue.put_nowait(DownloaderTask(kill=True))

        self.log.info('Waiting for installation to finish...')
        self.writer_queue.put_nowait(WriterTask('', kill=True))

        writer_p.join(timeout=10.0)
        if writer_p.exitcode is None:
            self.log.warning(f'Terminating writer process, no exit code!')
            writer_p.terminate()

        # forcibly kill DL workers that are not actually dead yet
        for child in self.children:
            if child.exitcode is None:
                child.terminate()

        # make sure all the threads are dead.
        for t in self.threads:
            t.join(timeout=5.0)
            if t.is_alive():
                self.log.warning(f'Thread did not terminate! {repr(t)}')

        # clean up resume file
        if self.resume_file:
            try:
                os.remove(self.resume_file)
            except OSError as e:
                self.log.warning(f'Failed to remove resume file: {e!r}')

        # close up shared memory
        self.shared_memory.close()
        self.shared_memory.unlink()
        self.shared_memory = None

        self.log.info('All done! Download manager quitting...')
        # finally, exit the process.
        exit(0)
Пример #21
0
    def start(self):
        print("started")

        def producer(target_queues, data_loader):
            num_queues = len(target_queues)
            ctr = 0
            for item in data_loader:
                q = ctr % num_queues
                target_queues[q].put(item)
                ctr += 1
            [target_queue.put("end") for target_queue in target_queues]

        def transformer(target_queue, source_queue, transform, seed):
            np.random.seed(seed)
            item = source_queue.get()
            while item != "end":
                target_queue.put(transform(**item))
                item = source_queue.get()
            target_queue.put("end")

        def joiner(transformed_queues, ready_queues, join_method, batch_size):
            # collects and joins samples to batches
            stop = False
            num_queues = len(transformed_queues)
            ctr = 0
            num_rdy = len(ready_queues)
            rdy_ctr = 0
            while not stop:
                items = []
                for _ in range(batch_size):
                    q = ctr % num_queues
                    item = transformed_queues[q].get()
                    ctr += 1
                    if item == "end":
                        stop = True
                        break
                    items.append(item)
                if stop:
                    break
                else:
                    joined = join_method(items)
                    rdy_q = rdy_ctr % num_rdy
                    ready_queues[rdy_q].put(joined)
                    rdy_ctr += 1
            [ready_queue.put("end") for ready_queue in ready_queues]

        self.sample_queues = [
            MPQueue(self.num_raw_cached) for i in range(self.num_processes)
        ]
        self.transformed_queues = [
            MPQueue(self.num_transformed_cached)
            for i in range(self.num_processes)
        ]
        self.ready_queues = [MPQueue(2) for i in range(2)]

        self.sample_generating_process = ProcessTerminateOnJoin(
            target=producer, args=(self.sample_queues, self.dataloader))
        self.sample_generating_process.daemon = True
        self.sample_generating_process.start()

        self.joining_process = ProcessTerminateOnJoin(
            target=joiner,
            args=(self.transformed_queues, self.ready_queues,
                  self.batch_joiner, self.batch_size))
        self.joining_process.daemon = True
        self.joining_process.start()

        self.q_ctr = 0

        self.transformers = []
        for i in range(self.num_processes):
            p = ProcessTerminateOnJoin(target=transformer,
                                       args=(self.transformed_queues[i],
                                             self.sample_queues[i],
                                             self.transform, self.seeds[i]))
            p.daemon = True
            p.start()
            self.transformers.append(p)

        self.was_started = True
 def __init__(self):
     # 这个是进程间通信用的Queue
     self.queue = MPQueue()
Пример #23
0
 def start(self):
     self.mpQueue = MPQueue()
     self.process = Process(target=self.processMain)
     self.process.start()
Пример #24
0
from udplistner import Receiver as UDPListner
from archive import Archive


if __name__ == '__main__':

    logger = logging.getLogger('Log')
    logger.setLevel(logging.DEBUG)
    formatter = '%(asctime)s %(module)s(%(lineno)s):%(funcName)s [%(levelname)s]: %(message)s'
    streamhandler = logging.StreamHandler()
    streamhandler.setFormatter(logging.Formatter(formatter, datefmt='%H:%M:%S'))
    streamhandler.setLevel(logging.DEBUG)
    logger.addHandler(streamhandler)
    logger.debug('Session start')

    mpqueue = MPQueue()  # queue for multiprocessing
    threadqueue = ThreadQueue()  # queue for threading
    aqueue = MPQueue()  # for archiver

    ws = WSServer(port=Constants.wsport)
    ws.start()

    wd = WebServer(name='Flask')
    wd.start()

    serialReceiver: Dict[str, dict] = {
        'sGPS': {
            'port': '/dev/ttyACM0',
            'baud': 9600,
        },
        'sAIS': {
Пример #25
0
 def _create_queues(self):
     return {
         CoreID(core): MPQueue()
         for core in range(self.config.workers_count + 1)
     }
Пример #26
0
    def __init__(self):
        logger.info('CUEMS ENGINE INITIALIZATION')
        # Main thread ids
        logger.info(f'Main thread PID: {os.getpid()}')

        try:
            logger.info(f'Hardware discovery launched...')
            hw_discovery()
        except Exception as e:
            logger.exception(f'Exception: {e}')
            exit(-1)

        # Running flag
        self.stop_requested = False

        self._editor_request_uuid = None

        #########################################################
        # System signals handlers
        signal.signal(signal.SIGINT, self.sigIntHandler)
        signal.signal(signal.SIGTERM, self.sigTermHandler)
        signal.signal(signal.SIGUSR1, self.sigUsr1Handler)
        signal.signal(signal.SIGUSR2, self.sigUsr2Handler)
        signal.signal(signal.SIGCHLD, self.sigChldHandler)

        # Conf load manager
        try:
            self.cm = ConfigManager(path=CUEMS_CONF_PATH)
        except FileNotFoundError:
            logger.critical(
                'Node config file could not be found. Exiting !!!!!')
            exit(-1)

        # Our empty script object
        self.script = None
        '''
        CUE "POINTERS":
        here we use the "standard" point of view that there is an
        ongoing cue already running (one or many, at least the last to be gone)
        and a pointer indicating which is the next to be gone when go is pressed
        '''
        self.ongoing_cue = None
        self.next_cue_pointer = None
        self.armedcues = list()

        # MTC master object creation through bound library and open port
        self.mtcmaster = libmtcmaster.MTCSender_create()
        self.go_offset = 0

        # MTC listener (could be usefull)
        try:
            self.mtclistener = MtcListener(
                port=self.cm.node_conf['mtc_port'],
                step_callback=partial(CuemsEngine.mtc_step_callback, self),
                reset_callback=partial(CuemsEngine.mtc_step_callback, self,
                                       CTimecode('0:0:0:0')))
        except KeyError:
            logger.error(
                'mtc_port config could bot be properly loaded. Exiting.')
            exit(-1)

        # WebSocket server
        settings_dict = {}
        settings_dict['session_uuid'] = str(uuid1())
        settings_dict['library_path'] = self.cm.library_path
        settings_dict['tmp_upload_path'] = self.cm.tmp_upload_path
        settings_dict['database_name'] = self.cm.database_name
        settings_dict['load_timeout'] = self.cm.node_conf['load_timeout']
        settings_dict['discovery_timeout'] = self.cm.node_conf[
            'discovery_timeout']
        self.engine_queue = MPQueue()
        self.editor_queue = MPQueue()
        self.ws_server = CuemsWsServer(self.engine_queue, self.editor_queue,
                                       settings_dict)
        try:
            self.ws_server.start(self.cm.node_conf['websocket_port'])
        except KeyError:
            self.stop_all_threads()
            logger.exception(
                'Config error, websocket_port key not found in settings. Exiting.'
            )
            exit(-1)
        except Exception as e:
            self.stop_all_threads()
            logger.error('Exception when starting websocket server. Exiting.')
            logger.exception(e)
            exit(-1)
        else:
            # Threaded own queue consumer loop
            self.engine_queue_loop = threading.Thread(
                target=self.engine_queue_consumer, name='engineq_consumer')
            self.engine_queue_loop.start()

        # OSSIA OSCQuery server
        self.ossia_queue = queue.Queue()
        self.ossia_server = OssiaServer(self.cm.node_conf['id'],
                                        self.cm.node_conf['oscquery_port'],
                                        self.cm.node_conf['oscquery_out_port'],
                                        self.ossia_queue)

        # Initial OSC nodes to tell ossia to configure
        OSC_ENGINE_CONF = {
            '/engine': [ossia.ValueType.Impulse, None],
            '/engine/command': [ossia.ValueType.Impulse, None],
            '/engine/command/load':
            [ossia.ValueType.String, self.load_project_callback],
            '/engine/command/loadcue':
            [ossia.ValueType.String, self.load_cue_callback],
            '/engine/command/go': [ossia.ValueType.Impulse, self.go_callback],
            '/engine/command/gocue':
            [ossia.ValueType.String, self.go_cue_callback],
            '/engine/command/pause':
            [ossia.ValueType.Impulse, self.pause_callback],
            '/engine/command/stop':
            [ossia.ValueType.Impulse, self.stop_callback],
            '/engine/command/resetall':
            [ossia.ValueType.Impulse, self.reset_all_callback],
            '/engine/command/preload':
            [ossia.ValueType.String, self.load_cue_callback],
            '/engine/command/unload':
            [ossia.ValueType.String, self.unload_cue_callback],
            '/engine/status/timecode': [ossia.ValueType.Int, None],
            '/engine/status/currentcue': [ossia.ValueType.String, None],
            '/engine/status/nextcue': [ossia.ValueType.String, None],
            '/engine/status/running': [ossia.ValueType.Int, None]
        }

        self.ossia_queue.put(QueueData('add', OSC_ENGINE_CONF))

        # Check, start and OSC register video devices/players
        self._video_players = {}
        try:
            self.check_video_devs()
        except Exception as e:
            logger.error(f'Error checking & starting video devices...')
            logger.exception(e)
            logger.error(f'Exiting...')
            exit(-1)

        # Everything is ready now and should be working, let's run!
        while not self.stop_requested:
            time.sleep(0.005)

        self.stop_all_threads()
Пример #27
0
    def __init__(self, logger=None):
        in_queue = MPQueue()
        out_queue = MPQueue()

        super(ProcessCallRelay, self).__init__(logger, in_queue, out_queue)
Пример #28
0
    empty = pool.join(timeout=10, raise_error=False)
    if not empty:
        logger.warning('{} join timeout. kill it'.format(worker_name))
        pool.kill()
    logger.warning('{} QUIT !!!'.format(worker_name))
    # return state
    return '{}: {}'.format(worker_name, str(state))


def call(q: MPQueue, func, *args):
    result = func(*args)
    q.put(result)
    q.close()


result_queue = MPQueue()

ps = []
for bind in binds:
    for w in range(workers):
        print('bind = {}, w = {}'.format(bind, w))
        p = Process(target=call, args=(result_queue, run, bind, w))
        p.start()
        ps.append(p)

states = []
while len(states) != len(ps):
    state = result_queue.get()
    states.append(state)

for state in states:
Пример #29
0
# Copyright (C) 2016  Red Hat, Inc
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
All global queues.
"""

from multiprocessing import Queue as MPQueue

#: Input queue for the investigator thread(s)
INVESTIGATE_QUEUE = MPQueue()

# ROUTER_QUEUE = Queue()
# QUEUES = {
#     "ALL": [Queue(), Queue()],
#     "10.2.0.2": [Queue()],
# }
Пример #30
0
 def __init__(self, maxsize=64, multiprocess=False):
     self.q = (MPQueue(maxsize=maxsize) if multiprocess else Queue(
         maxsize=maxsize))