def preload(self): try: self.next_data = TensorTuple(next(self.loader)) except StopIteration: self.next_data = None raise StopIteration if self._cuda_available: with torch.cuda.stream(self.stream): self.next_data = self.next_data.to(device="cuda", non_blocking=True)
def data_preprocess(self, data: Tuple[Tensor, ...]) -> (Tuple[Tensor, ...], int): if isinstance(data, tuple): data = (cat((data[0][0], data[1][0]), dim=0), cat((data[0][1], data[1][1]), dim=0)) return TensorTuple(data).to( self.device, non_blocking=self._cuda_nonblocking), data[0].size(0)
class DataPrefetcher(object): """ prefetch data """ def __init__(self, loader: DataLoader): self._cuda_available = torch.cuda.is_available() self._length = len(loader) self.loader = iter(loader) self.stream = torch.cuda.Stream() if self._cuda_available else None self.next_data = None self.preload() def preload(self): try: self.next_data = TensorTuple(next(self.loader)) except StopIteration: self.next_data = None raise StopIteration if self._cuda_available: with torch.cuda.stream(self.stream): self.next_data = self.next_data.to(device="cuda", non_blocking=True) def __len__(self): return self._length def __iter__(self): return self def __next__(self): if self._cuda_available: torch.cuda.current_stream().wait_stream(self.stream) data = self.next_data self.preload() return data
def test_tensortuple(): a = torch.randn(3, 3), torch.randn(3, 3) t = TensorTuple(a) assert t[0].dtype == torch.float32 assert t.to(torch.int32)[0].dtype == torch.int32
def data_preprocess(self, data: Tuple[Tensor, ...]) -> (Tuple[Tensor, ...], int): if isinstance(data[0], list): data = (*data[0], *data[1][0], data[1][1]) return TensorTuple(data).to( self.device, non_blocking=self._cuda_nonblocking), data[0].size(0)
def data_preprocess(self, data: Tuple[Tensor, ...]) -> (Tuple[Tensor, ...], int): data = unroll(data) return TensorTuple(data).to( self.device, non_blocking=self._cuda_nonblocking), data[0].size(0)