def load_data(self): self.data_loader = dict() if self.arg.train_feeder_args: train_feeder = import_class(self.arg.train_feeder) self.data_loader['train'] = torch.utils.data.DataLoader( dataset=train_feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, pin_memory=True, # set True when memory is abundant num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device), drop_last=True, worker_init_fn=init_seed) if self.arg.test_feeder_args: test_feeder = import_class(self.arg.test_feeder) self.data_loader['test'] = torch.utils.data.DataLoader( dataset=test_feeder(**self.arg.test_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, pin_memory=True, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device), drop_last=False, worker_init_fn=init_seed)
def load_data(self): Feeder = import_class(self.arg.feeder) if 'debug' not in self.arg.train_feeder_args: self.arg.train_feeder_args['debug'] = self.arg.debug self.data_loader = dict() #print(self.arg.train_feeder_args) #, sampler=sampler drop_last=True #print(self.arg.test_feeder_args) if self.arg.phase == 'train': traindataset = Feeder(**self.arg.train_feeder_args) weights = [40 if label == 1 else 1 for data, label in traindataset] #print(weights) sampler = WeightedRandomSampler(weights,\ num_samples=len(weights),\ replacement=True) #,sampler=sampler self.data_loader['train'] = torch.utils.data.DataLoader( dataset=traindataset, batch_size=self.arg.batch_size, #shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device), sampler=sampler) # if self.arg.test_feeder_args: self.data_loader['test'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.test_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device))
def load_data(self): torch.multiprocessing.set_sharing_strategy('file_system') Feeder = import_class(self.arg.feeder) if 'debug' not in self.arg.train_feeder_args: self.arg.train_feeder_args['debug'] = self.arg.debug self.data_loader = dict() if self.arg.phase == 'train': self.data_loader['train'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device), drop_last=True) if self.arg.gallery_feeder_args: self.data_loader['gallery'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.gallery_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device)) self.data_loader['probe'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.probe_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device))
def load_data(self): Feeder = import_class(self.arg.feeder) if 'debug' not in self.arg.train_feeder_args: self.arg.train_feeder_args['debug'] = self.arg.debug self.data_loader = dict() if self.arg.phase == 'train': self.data_loader['train'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, num_workers=self.arg.num_worker * torchlight.ngpu( self.arg.device), drop_last=True) if self.arg.test_feeder_args: self.data_loader['test'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.test_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu( self.arg.device))
def load_data(self): Feeder = import_class(self.arg.feeder) if 'debug' not in self.arg.train_feeder_args: self.arg.train_feeder_args['debug'] = self.arg.debug self.data_loader = dict() if self.arg.phase == 'train': self.data_loader['train'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, num_workers=self.arg.num_worker * torchlight.ngpu( self.arg.device), drop_last=True) if self.arg.test_feeder_args: self.data_loader['test'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.test_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu( self.arg.device))
def load_data(self): Feeder = import_class(self.arg.feeder) if 'debug' not in self.arg.train_feeder_args: self.arg.train_feeder_args['debug'] = self.arg.debug self.data_loader = dict() if self.arg.phase == 'train': self.data_loader['train'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, # 将输入数据的顺序打乱,是为了使数据更有独立性 num_workers=self.arg.num_worker * torchlight.ngpu( # 工作者数量,默认是0。使用多少个子进程来导入数据。 self.arg.device), drop_last=True ) # 丢弃最后数据,默认为False。设置了 batch_size 的数目后,最后一批数据未必是设置的数目,有可能会小些。这时你是否需要丢弃这批数据。 if self.arg.test_feeder_args: self.data_loader['test'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.test_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device))
args = parser.parse_args() device = 'cuda:0' #%% TBD: Load the dataset if DEBUG == False: # data, labels, data_train, labels_train, data_test, labels_test = \ # loader.load_data(data_path, ftype, coords, joints, cycles=cycles) # num_classes = np.unique(labels_train).shape[0] data_loader_train_test = list() data_loader_train_test.append( torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(train=True), batch_size=args.batch_size, shuffle=True, num_workers=args.num_worker * torchlight.ngpu(device), drop_last=True)) data_loader_train_test.append( torch.utils.data.DataLoader( dataset=loader.TrainTestLoader(train=False), batch_size=args.batch_size, shuffle=True, num_workers=args.num_worker * torchlight.ngpu(device), drop_last=True)) data_loader_train_test = dict(train=data_loader_train_test[0], test=data_loader_train_test[1]) else: data_loader_train_test = list() data_loader_train_test.append( torch.utils.data.DataLoader( dataset=loader_test.NYU_Depth_V2(train=True),
def load_data(self): if 'debug' not in self.arg.train_feeder_args: self.arg.train_feeder_args['debug'] = self.arg.debug self.need_val = False self.need_bg = not ('yolo' in self.arg.model) ## yolo: False imdb_name = self.arg.train_dataset imdbval_name = self.arg.val_dataset imdbtest_name = self.arg.test_dataset self.train_size = 0 self.val_size = 0 self.test_size = 0 self.imdb, self.imdb_val, self.imdb_test = None, None, None Feeder = import_class(self.arg.feeder) sampler = import_class(self.arg.sampler) if self.arg.sampler else None self.data_loader = dict() self.classes = None if self.arg.phase == 'train': assert imdb_name is not None, print( 'Training data is not provoided.') imdb, roidb, ratio_list, ratio_index = combined_roidb( imdb_name, **vars(self.arg)) self.classes = imdb.classes if self.need_bg else imdb.classes[1:] num_classes = imdb.num_classes if self.need_bg else imdb.num_classes - 1 self.imdb = imdb self.roidb = roidb train_size = len(roidb) self.train_size = train_size self.data_loader['train'] = torch.utils.data.DataLoader( dataset=Feeder(roidb, num_classes, need_bg=self.need_bg, **vars(self.arg)), batch_size=self.arg.train_args['ims_per_batch'], num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device), drop_last=True) if imdbval_name: imdb_val, roidb_val, ratio_list_val, ratio_index_val = combined_roidb( imdbval_name, training=False, **vars(self.arg)) self.need_val = True self.val_size = len(roidb_val) self.imdb_val = imdb_val self.roidb_val = roidb_val self.data_loader['val'] = torch.utils.data.DataLoader( dataset=Feeder(roidb_val, num_classes, need_bg=self.need_bg, training=False, **vars(self.arg)), batch_size=self.arg.test_args['ims_per_batch'], shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu(self.arg.device)) else: assert imdbtest_name is not None, print( 'Test data is not provoided.') imdb_test, roidb_test, ratio_list_test, ratio_index_test = combined_roidb( imdbtest_name, training=False, **vars(self.arg)) self.classes = imdb_test.classes if self.need_bg else imdb_test.classes[ 1:] num_classes = imdb_test.num_classes if self.need_bg else imdb_test.num_classes - 1 self.test_size = len(roidb_test) self.imdb_test = imdb_test self.roidb_test = roidb_test self.data_loader['test'] = torch.utils.data.DataLoader( dataset=Feeder(roidb_test, num_classes, need_bg=self.need_bg, training=False, **vars(self.arg)), batch_size=self.arg.test_args['ims_per_batch'], shuffle=False, num_workers=0, pin_memory=True)