def __new__(cls, root, train=True, transform=None, download=False): if train: return datasets.SBDataset(root, image_set='train_noval', mode='segmentation', download=download, transforms=transform) else: return datasets.VOCSegmentation(root, image_set='val', download=download, transforms=transform)
def __new__(cls, root, train=True, transform=None, download=False): root = pathlib.Path(root).parent if train: return VD.SBDataset(root / "sbd", image_set='train_noval', mode='segmentation', transforms=transform, download=download) else: return VD.VOCSegmentation(root / "voc", image_set="val", transforms=transform, download=download)
def sbdataset(): return collect_download_configs( lambda: datasets.SBDataset(ROOT, download=True), name="SBDataset", file="voc", )
data_root_dir = os.environ["DATA_PATH"] def get_summary_transforms(): transforms = [] transforms.append(T.ToTensor()) transforms.append( T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])) return T.Compose(transforms) sbd_train = datasets.SBDataset(data_root_dir + "/sbd/", image_set='train', mode='segmentation', download=False, transforms=get_summary_transforms()) sbd_val = datasets.SBDataset(data_root_dir + "/sbd/", image_set='val', mode='segmentation', download=False, transforms=get_summary_transforms()) cityscapes_train = datasets.Cityscapes(data_root_dir + "/cityscapes/", split='train', mode='fine', target_type='semantic', transforms=get_summary_transforms()) cityscapes_val = datasets.Cityscapes(data_root_dir + "/cityscapes/", split='val',