Esempio n. 1
0
 def setup(self):
     mnist_full = TrialMNIST(root=self.data_dir,
                             train=True,
                             num_samples=64,
                             download=True)
     self.mnist_train, self.mnist_val = random_split(mnist_full, [128, 64])
     self.dims = tuple(self.mnist_train[0][0].shape)
     self.mnist_test = TrialMNIST(root=self.data_dir,
                                  train=False,
                                  num_samples=32,
                                  download=True)
    def setup(self, stage: Optional[str] = None):

        mnist_full = TrialMNIST(
            root=self.data_dir, train=True, num_samples=64, download=True
        )
        self.mnist_train, self.mnist_val = random_split(mnist_full, [128, 64])
        self.dims = self.mnist_train[0][0].shape
Esempio n. 3
0
    def setup(self, stage: Optional[str] = None):

        if stage == "fit" or stage is None:
            mnist_full = TrialMNIST(
                root=self.data_dir, train=True, num_samples=64, download=True
            )
            self.mnist_train, self.mnist_val = random_split(mnist_full, [128, 64])
            self.dims = self.mnist_train[0][0].shape

        if stage == "test" or stage is None:
            self.mnist_test = TrialMNIST(
                root=self.data_dir, train=False, num_samples=64, download=True
            )
            self.dims = getattr(self, "dims", self.mnist_test[0][0].shape)

        self.non_picklable = lambda x: x ** 2
    def dataloader(self, train):
        dataset = TrialMNIST(root=self.hparams.data_root,
                             train=train,
                             download=True)

        loader = DataLoader(dataset=dataset,
                            batch_size=self.hparams.batch_size,
                            shuffle=True)
        return loader
Esempio n. 5
0
    def dataloader(self, train):
        dataset = TrialMNIST(root=self.data_root, train=train, download=True)

        loader = DataLoader(
            dataset=dataset,
            batch_size=self.batch_size,
            num_workers=3,
            shuffle=train,
        )
        return loader
    def dataloader(self, train):
        dataset = TrialMNIST(root=self.hparams.data_root, train=train, download=True)

        loader = DataLoader(
            dataset=dataset,
            batch_size=self.hparams.batch_size,
            # test and valid shall not be shuffled
            shuffle=train,
        )
        return loader
    def dataloader(self, train: bool, num_samples: int = 100):
        dataset = TrialMNIST(root=self.data_root, train=train, num_samples=num_samples, download=True)

        loader = DataLoader(
            dataset=dataset,
            batch_size=self.batch_size,
            num_workers=0,
            shuffle=train,
        )
        return loader
Esempio n. 8
0
    def _dataloader(self, train):
        # init data generators
        dataset = TrialMNIST(root=self.hparams.data_root, train=train, download=True)

        # when using multi-node we need to add the datasampler
        batch_size = self.hparams.batch_size

        loader = DataLoader(
            dataset=dataset,
            batch_size=batch_size,
            shuffle=True
        )

        return loader
Esempio n. 9
0
 def prepare_data(self):
     TrialMNIST(root=self.data_root, train=True, download=True)
Esempio n. 10
0
 def prepare_data(self):
     TrialMNIST(self.data_dir, train=True, download=True)
     TrialMNIST(self.data_dir, train=False, download=True)
Esempio n. 11
0
 def train_dataloader(self):
     return DataLoader(TrialMNIST(train=True, download=True), batch_size=16)
Esempio n. 12
0
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.accelerators import TPUAccelerator
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities import TPU_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
from tests.base.datasets import TrialMNIST
from tests.base.develop_utils import pl_multi_process_test

if TPU_AVAILABLE:
    import torch_xla
    import torch_xla.distributed.xla_multiprocessing as xmp
    SERIAL_EXEC = xmp.MpSerialExecutor()

_LARGER_DATASET = TrialMNIST(download=True,
                             num_samples=2000,
                             digits=(0, 1, 2, 5, 8))


# 8 cores needs a big dataset
def _serial_train_loader():
    return DataLoader(_LARGER_DATASET, batch_size=32)


@pytest.mark.skipif(not TPU_AVAILABLE, reason="test requires TPU machine")
@pl_multi_process_test
def test_model_tpu_cores_1(tmpdir):
    """Make sure model trains on TPU."""
    trainer_options = dict(
        default_root_dir=tmpdir,
        progress_bar_refresh_rate=0,
 def train_dataloader(self):
     return DataLoader(TrialMNIST(train=True,
                                  download=True,
                                  num_samples=500,
                                  digits=list(range(5))),
                       batch_size=128)
Esempio n. 14
0
 def long_train_loader():
     dataset = DataLoader(TrialMNIST(download=True, num_samples=15000, digits=(0, 1, 2, 5, 8)), batch_size=32)
     return dataset
Esempio n. 15
0
 def test_dataloader(self):
     return DataLoader(TrialMNIST(train=False, num_samples=50), batch_size=16)
Esempio n. 16
0
 def train_dataloader(self):
     return DataLoader(TrialMNIST(train=True, num_samples=100), batch_size=16)