コード例 #1
0
def test_d1():
    channels_in = 3
    channels_out = 3

    samples = 4
    device = "cuda"
    batches = 10
    batch_size = 32
    data_shape = (batches * batch_size, channels_in, 512, 512)

    model = torch.nn.Sequential(
        torch.nn.Conv2d(channels_in, channels_out, (3, 3)),
        torch.nn.ReLU(),
        torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
        torch.nn.ReLU(),
        torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
        torch.nn.ReLU(),
    ).to("cuda")

    for _ in range(samples):
        s1 = time.time()
        for _, a in zip(
                range(batches),
                to_tensor_generator(
                    batched_recycle(numpy.random.sample(data_shape),
                                    batch_size),
                    device=device,
                    preload_next=False,
                ),
        ):

            model(a)

        s2 = time.time()
        for _, a in zip(
                range(batches),
                to_tensor_generator(
                    batched_recycle(numpy.random.sample(data_shape),
                                    batch_size),
                    device=device,
                ),
        ):
            model(a)

        s3 = time.time()

        print(s2 - s1)
        print(s3 - s2)
コード例 #2
0
def test_d7():
    import numpy

    channels_in = 3

    samples = 10
    device = "cuda"
    batches = 3
    batch_size = 32
    data_shape = (batches * batch_size, 256, 256, channels_in)
    batch_shape = torch.Size([batch_size, 256, 256, channels_in])
    dtype = torch.float

    class RandomDataset(Dataset):
        """ """
        def __init__(self):
            self.d = numpy.random.sample(data_shape)

        def __len__(self):
            return len(self.d)

        def __getitem__(self, item):
            return self.d[item]

    dataloader = torch.utils.data.DataLoader(
        RandomDataset(),
        batch_size=batch_size,
        shuffle=True,
        num_workers=1,
        pin_memory=True,
    )

    generator = to_tensor_generator(
        batched_recycle(numpy.random.sample(data_shape), batch_size),
        device=device,
        preload_next=True,
        dtype=dtype,
    )

    for _ in range(samples):
        s1 = time.time()
        for _, a in zip(range(batches), generator):
            assert batch_shape == a.shape, a.shape

        s2 = time.time()

        for _, a in zip(range(batches), dataloader):
            a = a.to(device, dtype=dtype)
            assert batch_shape == a.shape, a.shape
        s3 = time.time()

        print(f"generator: {s2 - s1}")
        print(f"dataloader: {s3 - s2}")
コード例 #3
0
def test_d1():
    channels_in = 3
    channels_out = 3

    samples = 10
    device = "cuda"
    batches = 3
    batch_size = 32
    data_shape = (batches * batch_size, channels_in, 512, 512)

    model = torch.nn.Sequential(
        torch.nn.Conv2d(channels_in, channels_out, (3, 3)),
        torch.nn.ReLU(),
        torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
        torch.nn.ReLU(),
        torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
        torch.nn.ReLU(),
    ).to(device)

    for _ in range(samples):
        s1 = time.time()
        for _, a in zip(
                range(batches),
                to_tensor_generator(
                    batched_recycle(numpy.random.sample(data_shape),
                                    batch_size),
                    device=device,
                    preload_next=False,
                ),
        ):
            model(a)

        s2 = time.time()
        for _, a in zip(
                range(batches),
                torch.utils.data.DataLoader(
                    numpy.random.sample(data_shape),
                    batch_size=batch_size,
                    shuffle=True,
                    num_workers=1,
                    pin_memory=False,
                ),
        ):
            model(a.to(device, dtype=torch.float))

        s3 = time.time()

        print(f"generator: {s2 - s1}")
        print(f"dataloader: {s3 - s2}")
コード例 #4
0
def test_batch_with_label():
    import numpy

    channels_in = 3
    batches = 3
    batch_size = 32
    data_shape = (batches * batch_size, 256, 256, channels_in)

    generator = batched_recycle(
        zip(numpy.random.sample(data_shape),
            numpy.random.sample(data_shape[0])),
        batch_size,
    )

    for i, a in enumerate(generator):
        print(a)
        break
コード例 #5
0
def test_batch_generator1():
    a = range(9)
    batch_size = 3
    for i, b in zip(range(18), batched_recycle(a, batch_size)):
        assert [b_ in a for b_ in b]
    assert i == 17
コード例 #6
0
def test_d6():
    from torchvision.transforms import transforms
    import numpy
    from draugr import inner_map

    a_transform = transforms.Compose([
        transforms.ToPILImage("RGB"),
        transforms.Resize(224),
        transforms.CenterCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ])

    channels_in = 3
    channels_out = 3

    samples = 10
    device = "cuda"
    batches = 3
    batch_size = 32
    data_shape = (batches * batch_size, 256, 256, channels_in)
    batch_shape = torch.Size([batch_size, channels_in, 224, 224])

    class RandomDataset(Dataset):
        """ """
        def __init__(self):
            self.d = numpy.random.sample(data_shape)

        def __len__(self):
            return len(self.d)

        def __getitem__(self, item):
            return a_transform(self.d[item])

    dataloader = torch.utils.data.DataLoader(
        RandomDataset(),
        batch_size=batch_size,
        shuffle=True,
        num_workers=1,
        pin_memory=True,
    )

    generator = to_tensor_generator(
        inner_map(a_transform,
                  batched_recycle(numpy.random.sample(data_shape),
                                  batch_size)),
        device=device,
        preload_next=True,
    )

    for _ in range(samples):
        s1 = time.time()
        for _, a in zip(range(batches), generator):
            assert batch_shape == a.shape, a.shape

        s2 = time.time()

        for _, a in zip(range(batches), dataloader):
            assert batch_shape == a.shape, a.shape
        s3 = time.time()

        print(f"generator: {s2 - s1}")
        print(f"dataloader: {s3 - s2}")