Esempio n. 1
0
    def __init__(self, streamer):
        self.data_reader = streamer.data_reader
        self.num_workers = streamer.num_workers
        self.pin_memory = streamer.pin_memory and torch.cuda.is_available()
        self.timeout = streamer.timeout

        base_seed = torch.LongTensor(1).random_().item()

        if self.num_workers > 0:
            self.worker_init_fn = streamer.worker_init_fn
            self.worker_result_queue = multiprocessing.Queue()
            self.batch_queue = multiprocessing.Queue()
            self.batches_outstanding = 0
            self.worker_pids_set = False
            self.shutdown = False
            self.send_idx = 0
            self.done_event = multiprocessing.Event()

            self.worker_done_events = [
                multiprocessing.Event() for _i in range(self.num_workers)
            ]
            self.workers = []
            for i in range(self.num_workers):
                w = multiprocessing.Process(
                    target=_worker_loop,
                    args=(
                        self.data_reader,
                        self.batch_queue,
                        self.worker_result_queue,
                        self.done_event,
                        self.worker_done_events[i],
                        base_seed + i,
                        self.worker_init_fn,
                        i,
                    ),
                )
                w.daemon = True  # ensure that the worker exits on process exit
                # Process.start() actually take some time as it needs to start a
                # process and pass the arguments over via a pipe. Therefore, we
                # only add a worker to self.workers list after it started, so
                # that we do not call .join() if program dies before it starts,
                # and __del__ tries to join it but will get:
                #     AssertionError: can only join a started process.
                w.start()
                self.workers.append(w)

            self.num_live_workers = self.num_workers

            if self.pin_memory:
                self.data_queue = queue.Queue()
                self.pin_memory_thread = threading.Thread(
                    target=_pin_memory_loop,
                    args=(
                        self.worker_result_queue,
                        self.data_queue,
                        self.done_event,
                        self.pin_memory,
                        torch.cuda.current_device(),
                    ),
                )
                self.pin_memory_thread.daemon = True
                self.pin_memory_thread.start()
            else:
                self.data_queue = self.worker_result_queue

            _set_worker_pids(id(self), tuple(w.pid for w in self.workers))
            _set_SIGCHLD_handler()
            self.worker_pids_set = True

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
        else:
            # No workers
            self.data_reader_iter = iter(self.data_reader)
Esempio n. 2
0
    def __init__(
        self,
        num_workers=1,
        pin_memory=False,
        pinned_device=torch.device('cuda', 0),
        worker_init_fn=None,
        multiprocessing_context=None,
        async_sleep=0.01,
    ):
        if num_workers < 1:
            raise ValueError('num_workers option should be positive')
        if multiprocessing_context is None:
            multiprocessing_context = multiprocessing

        self.num_workers = num_workers
        self._pin_memory = pin_memory
        self._pinned_device = pinned_device
        self._async_sleep = async_sleep

        self._worker_pids_set = False
        self._shutdown = False

        self._workers_done_event = multiprocessing_context.Event()

        self._task_id = 0
        self._worker_is_active = []
        self._task_queues = []
        self._worker_result_queues = []
        self._workers = []
        self._available_worker_queue = queue.Queue()
        for i in range(self.num_workers):
            task_queue = multiprocessing_context.Queue()
            result_queue = multiprocessing_context.Queue()
            w = multiprocessing_context.Process(
                target=_worker_loop,
                args=(
                    task_queue,
                    result_queue,
                    self._workers_done_event,
                    worker_init_fn,
                    i,
                ),
            )
            w.daemon = True
            # NB: Process.start() actually take some time as it needs to
            #     start a process and pass the arguments over via a pipe.
            #     Therefore, we only add a worker to self._workers list after
            #     it started, so that we do not call .join() if program dies
            #     before it starts, and __del__ tries to join but will get:
            #     AssertionError: can only join a started process.
            w.start()
            self._task_queues.append(task_queue)
            self._worker_result_queues.append(result_queue)
            self._workers.append(w)
            self._worker_is_active.append(True)
            self._available_worker_queue.put(i)

        if self._pin_memory:
            self._pin_memory_thread_done_event = threading.Event()
            self._results_queues = [
                queue.Queue() for _ in range(self.num_workers)
            ]
            pin_memory_thread = threading.Thread(
                target=_pin_memory_loop,
                args=(
                    self._worker_result_queues,
                    self._results_queues,
                    self._pinned_device,
                    self._pin_memory_thread_done_event,
                    self._async_sleep,
                ),
            )
            pin_memory_thread.daemon = True
            pin_memory_thread.start()
            # Similar to workers (see comment above), we only register
            # pin_memory_thread once it is started.
            self._pin_memory_thread = pin_memory_thread
        else:
            self._results_queues = self._worker_result_queues

        signal_handling._set_worker_pids(id(self),
                                         tuple(w.pid for w in self._workers))
        signal_handling._set_SIGCHLD_handler()
        self._worker_pids_set = True

        self.__multiprocessing_context = multiprocessing_context
        self.__initialized = True
Esempio n. 3
0
    def __init__(self, streamer):
        self.data_reader = streamer.data_reader
        self.num_workers = streamer.num_workers
        self.pin_memory = streamer.pin_memory and torch.cuda.is_available()
        self.timeout = streamer.timeout

        base_seed = torch.LongTensor(1).random_().item()

        if self.num_workers > 0:
            self.worker_init_fn = streamer.worker_init_fn
            self.worker_result_queue = multiprocessing.Queue()
            self.batch_queue = multiprocessing.Queue()
            self.batches_outstanding = 0
            self.worker_pids_set = False
            self.shutdown = False
            self.send_idx = 0
            self.done_event = multiprocessing.Event()

            self.worker_done_events = [
                multiprocessing.Event() for _i in range(self.num_workers)
            ]
            self.workers = []
            for i in range(self.num_workers):
                w = multiprocessing.Process(
                    target=_worker_loop,
                    args=(
                        self.data_reader,
                        self.batch_queue,
                        self.worker_result_queue,
                        self.done_event,
                        self.worker_done_events[i],
                        base_seed + i,
                        self.worker_init_fn,
                        i,
                    ),
                )
                w.daemon = True  # ensure that the worker exits on process exit
                # Process.start() actually take some time as it needs to start a
                # process and pass the arguments over via a pipe. Therefore, we
                # only add a worker to self.workers list after it started, so
                # that we do not call .join() if program dies before it starts,
                # and __del__ tries to join it but will get:
                #     AssertionError: can only join a started process.
                w.start()
                self.workers.append(w)

            self.num_live_workers = self.num_workers

            if self.pin_memory:
                self.data_queue = queue.Queue()
                self.pin_memory_thread = threading.Thread(
                    target=_pin_memory_loop,
                    args=(
                        self.worker_result_queue,
                        self.data_queue,
                        self.done_event,
                        self.pin_memory,
                        torch.cuda.current_device(),
                    ),
                )
                self.pin_memory_thread.daemon = True
                self.pin_memory_thread.start()
            else:
                self.data_queue = self.worker_result_queue

            _set_worker_pids(id(self), tuple(w.pid for w in self.workers))
            _set_SIGCHLD_handler()
            self.worker_pids_set = True

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
        else:
            # No workers
            self.data_reader_iter = iter(self.data_reader)