def set_dataset(self): """properly handle multiple dataset situation """ fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "splits", self.opt.split, "{}_files.txt") test_fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "splits", "eigen", "test_files.txt") train_filenames = readlines(fpath.format("train")) val_filenames = readlines(test_fpath) train_dataset = KittiDataset( self.opt.data_path, self.opt.gt_path, train_filenames, self.opt.height, self.opt.width, crph=self.opt.crph, crpw=self.opt.crpw, is_train=True, predang_path=self.opt.predang_path, semanticspred_path=self.opt.semanticspred_path ) val_dataset = KittiDataset( self.opt.data_path, self.opt.val_gt_path, val_filenames, self.opt.height, self.opt.width, crph=self.opt.crph, crpw=self.opt.crpw, is_train=False, predang_path=self.opt.predang_path, semanticspred_path=self.opt.semanticspred_path ) self.train_loader = DataLoader( train_dataset, self.opt.batch_size, shuffle=True, num_workers=self.opt.num_workers, pin_memory=True, drop_last=True) self.val_loader = DataLoader( val_dataset, self.opt.batch_size, shuffle=False, num_workers=self.opt.num_workers, pin_memory=True, drop_last=True) self.train_num = train_dataset.__len__() self.val_num = val_dataset.__len__() self.num_total_steps = self.train_num // self.opt.batch_size * self.opt.num_epochs
def set_dataset(self): """properly handle multiple dataset situation """ test_fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "splits", self.opt.split, "test_files.txt") self.val_filenames = readlines(test_fpath) val_dataset = KittiDataset( self.opt.data_path, self.opt.val_gt_path, self.val_filenames, self.opt.height, self.opt.width, crph=self.opt.crph, crpw=self.opt.crpw, is_train=False, predang_path=self.opt.predang_path, semanticspred_path=self.opt.semanticspred_path, threeinput=self.opt.threeinput) self.val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=self.opt.num_workers, pin_memory=True, drop_last=True) self.val_num = val_dataset.__len__() mapping = readlines( os.path.join(self.opt.trainmapping_fold, 'training_mapping.txt')) self.indmapping = self.get_indmapping(mapping)
def set_dataset(self): """properly handle multiple dataset situation """ fpath = os.path.join(os.getcwd(), "splits", self.opt.split, "{}_files.txt") val_filenames = readlines(fpath.format("train")) val_dataset = KittiDataset( self.opt.data_path, self.opt.gt_path, val_filenames, self.opt.height, self.opt.width, crph=self.opt.crph, crpw=self.opt.crpw, is_train=False, semanticspred_path=self.opt.semanticspred_path) self.val_loader = DataLoader(val_dataset, self.opt.batch_size, shuffle=True, num_workers=0, pin_memory=True, drop_last=True) self.val_num = val_dataset.__len__()
def set_dataset(self): """properly handle multiple dataset situation """ fpath = os.path.join(os.getcwd(), "splits", self.opt.split, "{}_files.txt") val_filenames = readlines(fpath.format("test")) # val_filenames = readlines('/home/shengjie/Documents/Project_SemanticDepth/splits/eigen/test_files.txt') val_dataset = KittiDataset( self.opt.data_path, self.opt.gt_path, val_filenames, self.opt.height, self.opt.width, crph=self.opt.crph, crpw=self.opt.crpw, is_train=False, instancepred_path=self.opt.instancepred_path ) self.val_loader = DataLoader( val_dataset, self.opt.batch_size, shuffle=False, num_workers=0, pin_memory=True, drop_last=False) self.val_num = val_dataset.__len__()