예제 #1
0
    def __init__(self,
                 dataset: Dataset,
                 *,
                 transform: Transformation,
                 is_train: bool,
                 batch_size: int,
                 ctx: mx.Context,
                 dtype: DType = np.float32,
                 cyclic: bool = False,
                 num_workers: Optional[int] = None,
                 num_prefetch: Optional[int] = None,
                 **kwargs) -> None:
        self.batch_size = batch_size
        self.ctx = ctx
        self.dtype = dtype
        self.is_train = is_train
        self.transform = transform
        self.cyclic = cyclic

        self.parallel_data_loader = ParallelDataLoader(
            dataset=dataset,
            transformation=self.transform,
            cyclic=self.cyclic,
            is_train=self.is_train,
            batch_size=self.batch_size,
            ctx=ctx,
            dtype=self.dtype,
            num_workers=num_workers,
            num_prefetch=num_prefetch,
            **kwargs,
        )
예제 #2
0
    def __init__(
        self,
        dataset: Dataset,
        *,
        transform: Transformation,
        cyclic: bool,
        is_train: bool,
        batch_size: int,
        ctx: mx.Context,
        dtype: DType = np.float32,
        num_workers: Optional[int] = None,
        num_prefetch: Optional[int] = None,
        shuffle_buffer_length: Optional[int] = None,
        **kwargs,
    ) -> None:
        self.batch_size = batch_size
        self.ctx = ctx
        self.dtype = dtype
        self.is_train = is_train
        self.transform = transform
        self.cyclic = cyclic
        self.logger = logging.getLogger(__name__)
        if num_workers is not None and num_workers > mp.cpu_count():
            self.logger.warning(
                f"num_workers is set to {num_workers}, but there are only {mp.cpu_count()} cpus "
                f"please reduce the number of workers"
            )
        self.num_workers = num_workers
        self.num_prefetch = num_prefetch
        self.shuffle_buffer_length = shuffle_buffer_length

        self.parallel_data_loader = ParallelDataLoader(
            dataset=dataset,
            transformation=self.transform,
            cyclic=self.cyclic,
            is_train=self.is_train,
            batch_size=self.batch_size,
            ctx=self.ctx,
            dtype=self.dtype,
            num_workers=self.num_workers,
            num_prefetch=self.num_prefetch,
            shuffle_buffer_length=self.shuffle_buffer_length,
            **kwargs,
        )
예제 #3
0
파일: loader.py 프로젝트: slowjazz/gluon-ts
    def __init__(
        self,
        dataset: Dataset,
        *,
        transform: Transformation,
        cyclic: bool,
        is_train: bool,
        batch_size: int,
        ctx: mx.Context,
        dtype: DType = np.float32,
        num_workers: Optional[int] = None,
        num_prefetch: Optional[int] = None,
        shuffle_buffer_length: Optional[int] = None,
        **kwargs,
    ) -> None:
        self.batch_size = batch_size
        self.ctx = ctx
        self.dtype = dtype
        self.is_train = is_train
        self.transform = transform
        self.cyclic = cyclic
        self.logger = logging.getLogger(__name__)
        self.num_workers = num_workers
        self.num_prefetch = num_prefetch
        self.shuffle_buffer_length = shuffle_buffer_length

        self.parallel_data_loader = ParallelDataLoader(
            dataset=dataset,
            transformation=self.transform,
            cyclic=self.cyclic,
            is_train=self.is_train,
            batch_size=self.batch_size,
            ctx=self.ctx,
            dtype=self.dtype,
            num_workers=self.num_workers,
            num_prefetch=self.num_prefetch,
            shuffle_buffer_length=self.shuffle_buffer_length,
            **kwargs,
        )