def __init__(self, positive=True, limit_sentences=None, dataset_cache_dir=None, dataset_name=None): Dataset.__init__(self, limit_sentences=limit_sentences, dataset_cache_dir=dataset_cache_dir, dataset_name=dataset_name) self.positive = positive
def __init__(self, args, root, split, labelpath, cachedir, transform=None, target_transform=None, test_gap=50): Dataset.__init__(self, test_gap, split) self.num_classes = 174 self.transform = transform self.target_transform = target_transform self.cls2int = self.parse_something_labels(args.label_file) self.labels = self.parse_something_json(labelpath, self.cls2int) self.root = root cachename = '{}/{}_{}.pkl'.format(cachedir, self.__class__.__name__, split) self._data = cache(cachename)(self._prepare)(root, self.labels, split)
def __init__(self, txt_fn="data/train_phototourism_ms.txt", style2style=False): Dataset.__init__(self) self.txt_fn = txt_fn self.style2style = style2style self.path_orig_img = "/ssd/data/phototourism/orig" self.path_style_img = "/data/datasets/phototourism/style_transfer_all" self.style2fnames = get_style2fnames() # a dirty hack self.crop_size = 192 self.imgs = [line.rstrip("\n") for line in open(self.txt_fn)] self.nimg = len(self.imgs) self.npairs = len(self.imgs)
def __init__(self, args, root, split, labelpath, cachedir, transform=None, target_transform=None, input_size=224, test_gap=10): Dataset.__init__(self, test_gap, split) self.num_classes = 80 self.transform = transform self.target_transform = target_transform self.cls2int = dict((str(x + 1), x) for x in range(80)) self.labels = self.parse_ava_csv(labelpath, self.cls2int) self.root = root self.train_gap = 64 self.input_size = input_size cachename = '{}/{}_{}.pkl'.format(cachedir, self.__class__.__name__, split) self._data = cache(cachename)(self._prepare)(root, self.labels, split)
def __init__(self, args, root, split, label_path, cachedir, transform=None, target_transform=None, input_size=224, test_gap=25, train_gap=4): Dataset.__init__(self, test_gap, split) self.num_classes = 400 self.transform = transform self.target_transform = target_transform self.cls2int = self.parse_kinetics_labels(args.train_file) self.labels = self.parse_kinetics_csv(label_path, self.cls2int) self.root = root self.train_gap = train_gap self.input_size = input_size cachename = '{}/{}_{}.pkl'.format(cachedir, self.__class__.__name__, split) self._data = cache(cachename)(self._prepare)(root, self.labels, split)
def __init__(self): Dataset.__init__(self, data=ISBSG10.data(), dec_meta=ISBSG10.decision_meta(), obj_meta=ISBSG10.objective_meta())
def __init__(self): Dataset.__init__(self, data=Kemerer.data(), dec_meta=Kemerer.decision_meta(), obj_meta=Kemerer.objective_meta())
def __init__(self): Dataset.__init__(self, data=Desharnais.data(), dec_meta=Desharnais.decision_meta(), obj_meta=Desharnais.objective_meta())
def __init__(self): Dataset.__init__(self, data=China.data(), dec_meta=China.decision_meta(), obj_meta=China.objective_meta())
def __init__(self): Dataset.__init__(self, data=Finnish.data(), dec_meta=Finnish.decision_meta(), obj_meta=Finnish.objective_meta())
def __init__(self): Dataset.__init__(self, data=Maxwell.data(), dec_meta=Maxwell.decision_meta(), obj_meta=Maxwell.objective_meta())
def __init__(self, content): Dataset.__init__(self, limit_sentences=None, dataset_cache_dir=None, dataset_name=None) self.c = content
def __init__(self): Dataset.__init__(self, data=Kitchenhamm.data(), dec_meta=Kitchenhamm.decision_meta(), obj_meta=Kitchenhamm.objective_meta())
def __init__(self): Dataset.__init__(self, data=Albrecht.data(), dec_meta=Albrecht.decision_meta(), obj_meta=Albrecht.objective_meta())
def __init__(self): Dataset.__init__(self, data=Miyazaki.data(), dec_meta=Miyazaki.decision_meta(), obj_meta=Miyazaki.objective_meta())
def __init__(self, root="/ssd/data/phototourism/orig", txt_fn="data/train_phototourism_ms.txt"): Dataset.__init__(self) self.root = root self.txt_fn = txt_fn self.imgs = [line.rstrip("\n") for line in open(self.txt_fn)] self.nimg = len(self.imgs)