def _start(self):
        if len(self._processes) == 0:
            self.abort_event.clear()

            logging.debug("starting workers")
            self._queue_loop = 0
            self._end_ctr = 0

            if hasattr(self.generator, 'was_initialized'):
                self.generator.was_initialized = False

            with threadpool_limits(limits=1, user_api="blas"):
                for i in range(self.num_processes):
                    self._queues.append(Queue(self.num_cached_per_queue))
                    self._processes.append(Process(target=producer, args=(self._queues[i], self.generator, self.transform, i, self.seeds[i], self.abort_event)))
                    self._processes[-1].daemon = True
                    self._processes[-1].start()

            if self.pin_memory:
                import torch
                self.pin_memory_queue = thrQueue(2)
                self.pin_memory_thread = threading.Thread(target=pin_memory_loop, args=(self._queues, self.pin_memory_queue, self.abort_event, torch.cuda.current_device()))
                self.pin_memory_thread.daemon = True
                self.pin_memory_thread.start()
        else:
            logging.debug("MultiThreadedGenerator Warning: start() has been called but workers are already running")
    def _start(self):
        if len(self._processes) != self.num_processes:
            self._finish()
            self.abort_event.clear()

            logging.debug("starting workers")
            self._queue_ctr = 0
            self._end_ctr = 0

            if hasattr(self.generator, 'was_initialized'):
                self.generator.was_initialized = False

            with threadpool_limits(limits=1, user_api="blas"):
                for i in range(self.num_processes):
                    self._queues.append(Queue(self.num_cached_per_queue))
                    self._processes.append(Process(target=producer, args=(
                        self._queues[i], self.generator, self.transform, i, self.seeds[i], self.abort_event)))
                    self._processes[-1].daemon = True
                    self._processes[-1].start()

            if torch is not None and torch.cuda.is_available():
                gpu = torch.cuda.current_device()
            else:
                gpu = None

            # more caching = more performance. But don't cache too much or your RAM will hate you
            self.pin_memory_queue = thrQueue(max(3, self.num_cached_per_queue * self.num_processes // 2))

            self.pin_memory_thread = threading.Thread(target=results_loop, args=(
                self._queues, self.pin_memory_queue, self.abort_event, self.pin_memory, gpu, self.wait_time))

            self.pin_memory_thread.daemon = True
            self.pin_memory_thread.start()
        else:
            logging.debug("MultiThreadedGenerator Warning: start() has been called but workers are already running")
Beispiel #3
0
    def _start(self):
        if len(self._threads) == 0:
            logging.debug("starting workers")
            self._queue_loop = 0
            self._end_ctr = 0

            for i in range(self.num_processes):
                self._queues.append(Queue(self.num_cached_per_queue))
                self._threads.append(
                    Process(target=producer,
                            args=(self._queues[i], self.generator,
                                  self.transform, i, self.seeds[i])))
                self._threads[-1].daemon = True
                self._threads[-1].start()

            if self.pin_memory:
                self.pin_memory_queue = thrQueue(2)
                self.pin_memory_thread = threading.Thread(
                    target=pin_memory_loop,
                    args=(self._queues, self.pin_memory_queue))
                self.pin_memory_thread.daemon = True
                self.pin_memory_thread.start()
        else:
            logging.debug(
                "MultiThreadedGenerator Warning: start() has been called but workers are already running"
            )
    def _start(self):
        if not self.initialized:
            self._finish()

            self._queue = Queue(self.num_cached)
            self.results_loop_queue = thrQueue(self.num_cached)
            self.abort_event = Event()

            logging.debug("starting workers")
            if isinstance(self.generator, DataLoader):
                self.generator.was_initialized = False

            with threadpool_limits(limits=1, user_api=None):
                for i in range(self.num_processes):
                    self._processes.append(
                        Process(target=producer,
                                args=(self._queue, self.generator,
                                      self.transform, i, self.seeds[i],
                                      self.abort_event, self.wait_time)))
                    self._processes[-1].daemon = True
            _ = [i.start() for i in self._processes]

            if torch is not None and torch.cuda.is_available():
                gpu = torch.cuda.current_device()
            else:
                gpu = None

            # in_queue: Queue, out_queue: thrQueue, abort_event: Event, pin_memory: bool, worker_list: List[Process],
            # gpu: Union[int, None] = None, wait_time: float = 0.02
            self.results_loop_thread = threading.Thread(
                target=results_loop,
                args=(self._queue, self.results_loop_queue, self.abort_event,
                      self.pin_memory, self._processes, gpu, self.wait_time))
            self.results_loop_thread.daemon = True
            self.results_loop_thread.start()

            self.initialized = True
        else:
            logging.debug(
                "MultiThreadedGenerator Warning: start() has been called but workers are already running"
            )