Exemplo n.º 1
0
def test_MultiWebLoader_torch_pipe():
    import torch

    def f(source):
        for sample in source:
            assert isinstance(sample, (list, tuple))
            assert len(sample) == 2
            yield sample

    wl = loader.MultiWebLoader("testdata/sample.tgz",
                               90,
                               multi_pipe=f,
                               fields="ppm;jpg;jpeg;png cls".split(),
                               batch_size=32,
                               converters=loader.totorch())
    for sample in wl:
        break
    assert len(sample) == 2
    assert isinstance(sample[0], torch.Tensor), sample[0]
    assert isinstance(sample[1], torch.Tensor), sample[1]
    assert sample[0].dtype == torch.float32, sample[0]
    assert sample[1].dtype == torch.int64, sample[1]
    assert len(sample[0].shape) == 4, sample[0].shape
    assert len(sample[1].shape) == 1, sample[1].shape
    assert sample[0].shape[0] == 32, sample[0].shape
    assert sample[1].shape[0] == 32, sample[1].shape
    assert sample[0].shape[1] == 3, sample[0].size()
Exemplo n.º 2
0
def FIXME_test_MultiWebLoader_torch_sync_gpu2():
    # transfering torch tensors does not currently work;
    # use converters="torch_np" and then convert in the parent process
    import torch
    wl = loader.MultiWebLoader("testdata/sample.tgz",
                               90,
                               multi_pipe="sync_gpu_transfer",
                               fields="png cls".split(),
                               use_torch_mp=True,
                               converters="torch",
                               batch_size=32)
    for sample in wl:
        break
    assert len(sample) == 2
    assert isinstance(sample[0], torch.Tensor), sample[0]
    assert isinstance(sample[1], torch.Tensor), sample[1]
    assert sample[0].dtype == torch.float32, sample[0]
    assert sample[1].dtype == torch.int64, sample[1]
    assert len(sample[0].shape) == 4, sample[0].shape
    assert len(sample[1].shape) == 1, sample[1].shape
    assert sample[0].shape[0] == 32, sample[0].shape
    assert sample[1].shape[0] == 32, sample[1].shape
    assert sample[0].shape[1] == 3, sample[0].size()
    assert sample[0].device.type == "cuda", sample[0].device
    assert sample[1].device.type == "cuda", sample[1].device
    wl.terminate()
Exemplo n.º 3
0
def test_MultiWebLoader_torch_1():
    wl = loader.MultiWebLoader("testdata/sample.tgz",
                               2,
                               multi_pipe="sync_gpu_transfer",
                               fields="png cls".split(),
                               batch_size=32)
    total = sum(len(sample[0]) for sample in wl)
    assert total == 32 * 2, total
    wl.terminate()
Exemplo n.º 4
0
def test_MultiWebLoader_batching_epochs1():
    nbatches = 3
    bs = 8
    wl = loader.MultiWebLoader(["testdata/imagenet-000000.tgz"] * 4,
                               nbatches,
                               fields="__key__ ppm;jpg;jpeg;png cls".split(),
                               batch_size=bs,
                               tensor_batches=False,
                               processes=4)
    total = count_samples(wl)
    assert total == nbatches * bs, total
Exemplo n.º 5
0
def test_MultiWebLoader_batching_split():
    repeats = 4
    wl = loader.MultiWebLoader(["testdata/imagenet-000000.tgz"] * repeats,
                               2000,
                               split=True,
                               epochs=1,
                               fields="__key__ ppm;jpg;jpeg;png cls".split(),
                               tensor_batches=False,
                               processes=4)
    total = count_samples(wl, verbose=1, max=1000, batched=False)
    assert total == 47 * repeats, total
Exemplo n.º 6
0
def test_MultiWebLoader_torch():
    import torch
    wl = loader.MultiWebLoader("testdata/sample.tgz",
                               90,
                               fields="png cls".split(),
                               batch_size=32,
                               converters=loader.totorch())
    for sample in wl:
        break
    assert len(sample) == 2
    assert isinstance(sample[0], torch.Tensor), sample[0]
    assert isinstance(sample[1], torch.Tensor), sample[1]
    assert sample[0].dtype == torch.float32, sample[0]
    assert sample[1].dtype == torch.int64, sample[1]
    assert len(sample[0].shape) == 4, sample[0].shape
    assert len(sample[1].shape) == 1, sample[1].shape
    assert sample[0].shape[0] == 32, sample[0].shape
    assert sample[1].shape[0] == 32, sample[1].shape
    assert sample[0].shape[1] == 3, sample[0].size()
Exemplo n.º 7
0
def test_MultiWebLoader_torch_sync_gpu():
    import torch
    wl = loader.MultiWebLoader("testdata/sample.tgz",
                               90,
                               multi_pipe="sync_gpu_transfer",
                               fields="png cls".split(),
                               converters="torch_np",
                               batch_size=32)
    for sample in wl:
        break
    assert len(sample) == 2
    assert isinstance(sample[0], torch.Tensor), sample[0]
    assert isinstance(sample[1], torch.Tensor), sample[1]
    assert sample[0].dtype == torch.float32, sample[0]
    assert sample[1].dtype == torch.int64, sample[1]
    assert len(sample[0].shape) == 4, sample[0].shape
    assert len(sample[1].shape) == 1, sample[1].shape
    assert sample[0].shape[0] == 32, sample[0].shape
    assert sample[1].shape[0] == 32, sample[1].shape
    assert sample[0].shape[1] == 3, sample[0].size()
    assert sample[0].device.type == "cuda", sample[0].device
    assert sample[1].device.type == "cuda", sample[1].device
    wl.terminate()
Exemplo n.º 8
0
def FIXME_test_MultiWebLoader_torch_gpu():
    import torch
    wl = loader.MultiWebLoader("testdata/sample.tgz",
                               90,
                               fields="png cls".split(),
                               use_torch_mp=True,
                               batch_size=32,
                               converters="torch_cuda")
    for sample in wl:
        break
    assert len(sample) == 2
    assert isinstance(sample[0], torch.Tensor), sample[0]
    assert isinstance(sample[1], torch.Tensor), sample[1]
    assert sample[0].dtype == torch.float32, sample[0]
    assert sample[1].dtype == torch.int64, sample[1]
    assert len(sample[0].shape) == 4, sample[0].shape
    assert len(sample[1].shape) == 1, sample[1].shape
    assert sample[0].shape[0] == 32, sample[0].shape
    assert sample[1].shape[0] == 32, sample[1].shape
    assert sample[0].shape[1] == 3, sample[0].size()
    assert sample[0].device.type == "cuda", sample[0].device
    assert sample[1].device.type == "cuda", sample[1].device
    wl.terminate()