def build_dataloader(batch_size, dataset_dir, cfg): if cfg.dataset == "VOC2012": train_dataset = dataset.PascalVOC(dataset_dir, cfg.data_type, order=["image", "mask"]) elif cfg.dataset == "Cityscapes": train_dataset = dataset.Cityscapes(dataset_dir, "train", mode='gtFine', order=["image", "mask"]) else: raise ValueError("Unsupported dataset {}".format(cfg.dataset)) train_sampler = Infinite( RandomSampler(train_dataset, batch_size, drop_last=True)) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, transform=T.Compose( transforms=[ T.RandomHorizontalFlip(0.5), T.RandomResize(scale_range=(0.5, 2)), T.RandomCrop( output_size=(cfg.img_height, cfg.img_width), padding_value=[0, 0, 0], padding_maskvalue=255, ), T.Normalize(mean=cfg.img_mean, std=cfg.img_std), T.ToMode(), ], order=["image", "mask"], ), num_workers=2, ) return train_dataloader
def build_dataloader(dataset_dir, cfg): if cfg.dataset == "VOC2012": val_dataset = EvalPascalVOC( dataset_dir, "val", order=["image", "mask", "info"] ) elif cfg.dataset == "Cityscapes": val_dataset = dataset.Cityscapes( dataset_dir, "val", mode="gtFine", order=["image", "mask", "info"] ) else: raise ValueError("Unsupported dataset {}".format(cfg.dataset)) val_sampler = InferenceSampler(val_dataset, 1) val_dataloader = DataLoader( val_dataset, sampler=val_sampler, transform=T.Normalize( mean=cfg.img_mean, std=cfg.img_std, order=["image", "mask"] ), num_workers=2, ) return val_dataloader
def build_dataloader(dataset_dir, cfg): if cfg.DATASET == "VOC2012": val_dataset = EvalPascalVOC(dataset_dir, "val", order=["image", "mask", "info"]) elif cfg.DATASET == "Cityscapes": val_dataset = dataset.Cityscapes(dataset_dir, "val", mode='gtFine', order=["image", "mask", "info"]) else: raise ValueError("Unsupported dataset {}".format(cfg.DATASET)) val_sampler = data.SequentialSampler(val_dataset, cfg.VAL_BATCHES) val_dataloader = data.DataLoader( val_dataset, sampler=val_sampler, transform=T.Normalize(mean=cfg.IMG_MEAN, std=cfg.IMG_STD, order=["image", "mask"]), num_workers=cfg.DATA_WORKERS, ) return val_dataloader, val_dataset.__len__()
def build_dataloader(batch_size, dataset_dir, cfg): if cfg.DATASET == "VOC2012": train_dataset = dataset.PascalVOC( dataset_dir, cfg.DATA_TYPE, order=["image", "mask"] ) elif cfg.DATASET == "Cityscapes": train_dataset = dataset.Cityscapes( dataset_dir, "train", mode='gtFine', order=["image", "mask"] ) else: raise ValueError("Unsupported dataset {}".format(cfg.DATASET)) train_sampler = data.RandomSampler(train_dataset, batch_size, drop_last=True) train_dataloader = data.DataLoader( train_dataset, sampler=train_sampler, transform=T.Compose( transforms=[ T.RandomHorizontalFlip(0.5), T.RandomResize(scale_range=(0.5, 2)), T.RandomCrop( output_size=(cfg.IMG_HEIGHT, cfg.IMG_WIDTH), padding_value=[0, 0, 0], padding_maskvalue=255, ), T.Normalize(mean=cfg.IMG_MEAN, std=cfg.IMG_STD), T.ToMode(), ], order=["image", "mask"], ), num_workers=0, ) return train_dataloader, train_dataset.__len__()