Esempio n. 1
0
    model = BoringModel()
    dl_size = len(model.val_dataloader())
    limit_val_batches = 1 / (dl_size + 2)
    trainer = Trainer(limit_val_batches=limit_val_batches)
    trainer._data_connector.attach_data(model)
    with pytest.raises(
        MisconfigurationException,
        match=fr"{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`",
    ):
        trainer._reset_eval_dataloader(RunningStage.VALIDATING, model)


@pytest.mark.parametrize(
    "val_dl",
    [
        DataLoader(dataset=RandomDataset(32, 64), shuffle=True),
        CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)),
        CombinedLoader(
            [DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)]
        ),
        CombinedLoader(
            {
                "dl1": DataLoader(dataset=RandomDataset(32, 64)),
                "dl2": DataLoader(dataset=RandomDataset(32, 64), shuffle=True),
            }
        ),
    ],
)
def test_non_sequential_sampler_warning_is_raised_for_eval_dataloader(val_dl):
    trainer = Trainer()
    model = BoringModel()
Esempio n. 2
0
 def train_dataloader(self):
     return torch.utils.data.DataLoader(RandomDataset(32, 64),
                                        collate_fn=collate_fn)
Esempio n. 3
0
 def predict_dataloader(self):
     return [
         DataLoader(RandomDataset(32, 64)),
         DataLoader(RandomDataset(32, 64))
     ]
Esempio n. 4
0
 def val_dataloader(self):
     return DataLoader(RandomDataset(32, 2000), batch_size=32)
 def train_dataloader(self, *args, **kwargs) -> DataLoader:
     return DataLoader(RandomDataset(32, 64), batch_size=32)
Esempio n. 6
0
 def train_dataloader(self):
     return DataLoader(RandomDataset(32, 64), batch_size=2)
Esempio n. 7
0
from pytorch_lightning.accelerators import TPUAccelerator
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.plugins import TPUSpawnPlugin
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.utilities import _TPU_AVAILABLE
from pytorch_lightning.utilities.distributed import ReduceOp
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.utils import pl_multi_process_test

if _TPU_AVAILABLE:
    import torch_xla
    import torch_xla.distributed.xla_multiprocessing as xmp
    SERIAL_EXEC = xmp.MpSerialExecutor()

_LARGER_DATASET = RandomDataset(32, 2000)


# 8 cores needs a big dataset
def _serial_train_loader():
    return DataLoader(_LARGER_DATASET, batch_size=32)


class SerialLoaderBoringModel(BoringModel):

    def train_dataloader(self):
        return DataLoader(RandomDataset(32, 2000), batch_size=32)

    def val_dataloader(self):
        return DataLoader(RandomDataset(32, 2000), batch_size=32)
def test_dataloaders_with_missing_keyword_arguments():
    ds = RandomDataset(10, 20)

    class TestDataLoader(DataLoader):
        def __init__(self, dataset):
            super().__init__(dataset)

    loader = TestDataLoader(ds)
    sampler = SequentialSampler(ds)
    match = escape(
        "missing arguments are ['batch_sampler', 'sampler', 'shuffle']")
    with pytest.raises(MisconfigurationException, match=match):
        _update_dataloader(loader, sampler, mode="fit")
    match = escape(
        "missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler', 'shuffle']"
    )
    with pytest.raises(MisconfigurationException, match=match):
        _update_dataloader(loader, sampler, mode="predict")

    class TestDataLoader(DataLoader):
        def __init__(self, dataset, *args, **kwargs):
            super().__init__(dataset)

    loader = TestDataLoader(ds)
    sampler = SequentialSampler(ds)
    _update_dataloader(loader, sampler, mode="fit")
    _update_dataloader(loader, sampler, mode="predict")

    class TestDataLoader(DataLoader):
        def __init__(self, *foo, **bar):
            super().__init__(*foo, **bar)

    loader = TestDataLoader(ds)
    sampler = SequentialSampler(ds)
    _update_dataloader(loader, sampler, mode="fit")
    _update_dataloader(loader, sampler, mode="predict")

    class TestDataLoader(DataLoader):
        def __init__(self, num_feat, dataset, *args, shuffle=False):
            self.num_feat = num_feat
            super().__init__(dataset)

    loader = TestDataLoader(1, ds)
    sampler = SequentialSampler(ds)
    match = escape("missing arguments are ['batch_sampler', 'sampler']")
    with pytest.raises(MisconfigurationException, match=match):
        _update_dataloader(loader, sampler, mode="fit")
    match = escape(
        "missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler']"
    )
    with pytest.raises(MisconfigurationException, match=match):
        _update_dataloader(loader, sampler, mode="predict")

    class TestDataLoader(DataLoader):
        def __init__(self, num_feat, dataset, **kwargs):
            self.feat_num = num_feat
            super().__init__(dataset)

    loader = TestDataLoader(1, ds)
    sampler = SequentialSampler(ds)
    match = escape("missing attributes are ['num_feat']")
    with pytest.raises(MisconfigurationException, match=match):
        _update_dataloader(loader, sampler, mode="fit")
    match = escape("missing attributes are ['num_feat']")
    with pytest.raises(MisconfigurationException, match=match):
        _update_dataloader(loader, sampler, mode="predict")
def test_pre_made_batches():
    """Check that loader works with pre-made batches."""
    loader = DataLoader(RandomDataset(32, 10), batch_size=None)
    trainer = Trainer(fast_dev_run=1)
    trainer.predict(LoaderTestModel(), loader)
 def train_dataloader(self):
     return DataLoader(RandomDataset(32, 64), num_workers=self.num_workers)
 def test_dataloader(self):
     return [
         torch.utils.data.DataLoader(RandomDataset(32, 64))
         for _ in range(num_dataloaders)
     ]
 def test_dataloader(self):
     return [
         torch.utils.data.DataLoader(RandomDataset(32, 64)),
         torch.utils.data.DataLoader(RandomDataset(32, 64))
     ]
Esempio n. 13
0
 def train_dataloader(self):
     if self.trigger_stop_iteration:
         return DataLoader(RandomDataset(BATCH_SIZE, 2 * EXPECT_NUM_BATCHES_PROCESSED))
     return DataLoader(RandomDataset(BATCH_SIZE, EXPECT_NUM_BATCHES_PROCESSED))
Esempio n. 14
0
 def train_dataloader(self):
     return DataLoader(RandomDataset(BATCH_SIZE, DATASET_LEN))