def __init__(self, path=None, shuffle=True): DatasetGroup.__init__(self, 'svhn', path=path) self.train_on_extra = False # disabled self.image_shape = (32, 32, 3) self.label_shape = () self.shuffle = shuffle self._load_datasets()
def __init__(self, path=None, shuffle=True): DatasetGroup.__init__(self, 'mnist', path) self.image_shape = (28, 28, 1) self.label_shape = () self.shuffle = shuffle # self.download() self._load_datasets()
def __init__(self, path=None, shuffle=True): data_path = "D:/Dataset/Domain_Adaptation/" DatasetGroup.__init__(self, 'DAGM-10-to-4', data_path) self.image_shape = (256, 256, 1) self.label_shape = () self.shuffle = shuffle # self.download() self._load_datasets()
def __init__(self, path=None, shuffle=True): data_path = "D:/Dataset/Domain_Adaptation/" DatasetGroup.__init__(self, 'SVHN-to-MNIST', data_path) self.image_shape = (32, 32, 3) self.label_shape = () self.shuffle = shuffle # self.download() self._load_datasets()
def __init__(self, path=None, shuffle=True): data_path = "D:/Dataset/Domain_Adaptation/" DatasetGroup.__init__(self, 'MLCC_1st_to_2nd', data_path) self.image_shape = (160, 160, 3) self.label_shape = () self.shuffle = shuffle # self.download() self._load_datasets()
def __init__(self, path=None, shuffle=True, download=False): DatasetGroup.__init__(self, 'vda2017s', path=path, download=False) self.image_shape = (384, 216, 3) self.label_shape = () self.shuffle = shuffle self.base_path = os.path.join(path, 'train') for split in self.file_names.keys(): with open(os.path.join(self.base_path, self.file_names[split])) as f: img_file_names, labels = zip( *[line.split() for line in f.readlines()]) full_file_names = [ os.path.join(self.base_path, x) for x in img_file_names ] dataset = FilenameDataset(full_file_names, list(map(int, labels)), 'png') setattr(self, split, dataset)
def __init__(self, path=None, shuffle=True, download=False): DatasetGroup.__init__(self, 'vda2017coco', path=path, download=False) self.image_shape = (None, None, 3) self.label_shape = () self.shuffle = shuffle self.base_path = os.path.join(path, 'validation') for split in self.file_names.keys(): with open(os.path.join(self.base_path, self.file_names[split])) as f: img_file_names, labels = zip( *[line.split() for line in f.readlines()]) full_file_names = [ os.path.join(self.base_path, x) for x in img_file_names ] int_label_list = list(map(int, labels)) self.num_classes = max(self.num_classes, np.max(int_label_list) + 1) dataset = FilenameDataset(full_file_names, int_label_list, 'jpeg') setattr(self, split, dataset) logging.info('detected %d classes in input data' % self.num_classes)
def __init__(self, path=None, shuffle=True): DatasetGroup.__init__(self, 'product', path=path) self.image_shape = (256, 256, 3) self.label_shape = () self.shuffle = shuffle self._load_datasets()
def __init__(self, path=None, shuffle=True, download=True): DatasetGroup.__init__(self, 'usps', path=path, download=download) self.image_shape = (16, 16, 1) self.label_shape = () self.shuffle = shuffle self._load_datasets()
def __init__(self, path=None, shuffle=True): DatasetGroup.__init__(self, 'Caltech', path=path) self.image_shape = (224, 224, 3) self.label_shape = () self.shuffle = shuffle self._load_datasets()