Exemplo n.º 1
0
def make_dataloader(cfg,
                    logger,
                    dataset,
                    GPU_batch_size,
                    local_rank,
                    numpy=False):
    class BatchCollator(object):
        def __init__(self, device=torch.device("cpu")):
            self.device = device
            self.flip = cfg.DATASET.FLIP_IMAGES
            self.numpy = numpy

        def __call__(self, batch):
            with torch.no_grad():
                x, = batch
                if self.flip:
                    flips = [(slice(None, None, None), slice(None, None, None),
                              slice(None, None, random.choice([-1, None])))
                             for _ in range(x.shape[0])]
                    x = np.array([img[flip] for img, flip in zip(x, flips)])
                if self.numpy:
                    return x
                x = torch.tensor(x,
                                 requires_grad=True,
                                 device=torch.device(self.device),
                                 dtype=torch.float32)
                return x

    batches = db.data_loader(iter(dataset), BatchCollator(local_rank),
                             len(dataset) // GPU_batch_size)

    return batches
Exemplo n.º 2
0
 def reading_tf_records_from_dareblopy():
     features = {'data': db.FixedLenFeature([3, 256, 256], db.uint8)}
     iterator = db.data_loader(db.ParsedTFRecordsDatasetIterator(
         filenames, features, batch_size, 64),
                               worker_count=6)
     records = []
     for batch in iterator:
         records += batch
Exemplo n.º 3
0
 def reading_tf_records_from_dareblopy_withoutdecoding():
     features = {'data': db.FixedLenFeature([], db.string)}
     iterator = db.data_loader(db.ParsedTFRecordsDatasetIterator(
         filenames, features, batch_size, 128),
                               worker_count=6)
     records = []
     for batch in iterator:
         records += batch
Exemplo n.º 4
0
 def test_ParsedTFRecordsDatasetIterator_and_dataloader():
     features = {
         #'shape': db.FixedLenFeature([3], db.int64),
         'data': db.FixedLenFeature([3, 256, 256], db.uint8)
     }
     iterator = db.data_loader(db.ParsedTFRecordsDatasetIterator(
         filenames, features, 32, 64),
                               worker_count=6)
     records = []
     for batch in iterator:
         records += batch
Exemplo n.º 5
0
def make_dataloader(cfg, logger, dataset, GPU_batch_size, local_rank):
    class BatchCollator(object):
        def __init__(self, device=torch.device("cpu")):
            self.device = device

        def __call__(self, batch):
            with torch.no_grad():
                x, = batch
                x = torch.tensor(x, requires_grad=True, device=torch.device(self.device), dtype=torch.float32)
                return x
    batches = db.data_loader(iter(dataset), BatchCollator(local_rank), len(dataset) // GPU_batch_size)
    return batches
Exemplo n.º 6
0
def make_imagenet_dataloader(cfg,
                             logger,
                             dataset,
                             GPU_batch_size,
                             target_size,
                             local_rank,
                             do_random_crops=True):
    class BatchCollator(object):
        def __init__(self, device=torch.device("cpu")):
            self.device = device
            self.flip = cfg.DATASET.FLIP_IMAGES
            self.size = target_size
            p = math.log2(target_size)
            self.source_size = 2**p + 2**(p - 3)
            self.do_random_crops = do_random_crops

        def __call__(self, batch):
            with torch.no_grad():
                x, = batch

                if self.do_random_crops:
                    images = []
                    for im in x:
                        deltax = self.source_size - target_size
                        deltay = self.source_size - target_size
                        offx = np.random.randint(deltax + 1)
                        offy = np.random.randint(deltay + 1)
                        im = im[:, offy:offy + self.size,
                                offx:offx + self.size]
                        images.append(im)
                    x = np.stack(images)

                if self.flip:
                    flips = [(slice(None, None, None), slice(None, None, None),
                              slice(None, None, random.choice([-1, None])))
                             for _ in range(x.shape[0])]
                    x = np.array([img[flip] for img, flip in zip(x, flips)])
                x = torch.tensor(x,
                                 requires_grad=True,
                                 device=torch.device(self.device),
                                 dtype=torch.float32)

                return x

    batches = db.data_loader(iter(dataset), BatchCollator(local_rank),
                             len(dataset) // GPU_batch_size)

    return batches
Exemplo n.º 7
0
def make_dataloader(cfg, logger, dataset, GPU_batch_size, gpu_num=0):
    class BatchCollator(object):
        def __init__(self, device=torch.device("cpu")):
            self.device = device

        def __call__(self, batch):
            with torch.no_grad():
                x, y = batch
                #print(x.shape) #标签 (-1,3)
                #print(y.shape) #数据 (-1,3,4,4)
                image_batch = torch.tensor(y,
                                           requires_grad=True,
                                           device=torch.device(self.device),
                                           dtype=torch.float32)
                return image_batch

    batches = db.data_loader(iter(dataset), BatchCollator(gpu_num),
                             len(dataset))
    #batches = db.data_loader(iter(dataset),len(dataset))
    return batches