def load_data_test(self, config_dict): dataset = collected_dataset.CollectedDataset( data_folder=config_dict['dataset_folder_test'], img_type=config_dict['img_type'], input_types=config_dict['input_types'], label_types=config_dict['label_types_test']) batch_sampler = collected_dataset.CollectedDatasetSampler( data_folder=config_dict['dataset_folder_test'], useSubjectBatches=0, useCamBatches=config_dict['useCamBatches'], batch_size=config_dict['batch_size_test'], randomize=True, every_nth_frame=100) #config_dict['every_nth_frame']) loader = torch.utils.data.DataLoader( dataset, batch_sampler=batch_sampler, num_workers=config_dict['num_workers'], pin_memory=False, collate_fn=utils_data.default_collate_with_string) if 0: # save data for demo import pickle data_iterator = iter(loader) data_input, data_labels = next( data_iterator) #[next(data_iterator) for i in range(3)] batch_size = 8 input = { 'img': np.array(data_input['img'][:batch_size].numpy(), dtype='float16'), 'bg': np.array(data_input['bg'][:batch_size].numpy(), dtype='float16'), 'R_cam_2_world': np.array(data_input['R_cam_2_world'][:batch_size].numpy(), dtype='float16'), } label = { '3D': np.array(data_labels['3D'][:batch_size].numpy(), dtype='float16'), 'pose_mean': np.array(data_labels['pose_mean'][:batch_size].numpy(), dtype='float16'), 'pose_std': np.array(data_labels['pose_std'][:batch_size].numpy(), dtype='float16') } data_cach = tuple([input, label]) pickle.dump(data_cach, open('../examples/test_set.pickl', "wb")) IPython.embed() exit() return loader
def load_data_test(self,config_dict): dataset = collected_dataset.CollectedDataset(data_folder=config_dict['dataset_folder_test'], input_types=config_dict['input_types'], label_types=config_dict['label_types_test']) batch_sampler = collected_dataset.CollectedDatasetSampler(data_folder=config_dict['dataset_folder_test'], useSubjectBatches=0, useCamBatches=config_dict['useCamBatches'], batch_size=config_dict['batch_size_test'], randomize=True, every_nth_frame=config_dict['every_nth_frame']) loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=0, pin_memory=False, collate_fn=utils_data.default_collate_with_string) return loader
def load_data_test(self, config_dict): #factory = dataset_factory.DatasetFactory() #testloader = factory.load_data_test(config_dict_test) dataset = collected_dataset.CollectedDataset( data_folder= '/cvlabdata1/home/rhodin/code/humanposeannotation/python/pytorch_human_reconstruction/TMP/H36M-MultiView-test', input_types=config_dict['input_types'], label_types=config_dict['label_types_test'], useSubjectBatches=0, useCamBatches=config_dict['useCamBatches'], randomize=False) testloader = torch.utils.data.DataLoader( dataset, batch_size=config_dict['batch_size_test'], shuffle=False, num_workers=config_dict['num_workers'], pin_memory=False, drop_last=True, collate_fn=utils_data.default_collate_with_string) testloader = utils_data.PostFlattenInputSubbatchTensor(testloader) return testloader
def load_data_train(self, config_dict): #return load_data_test(config_dict) # HACK #factory = dataset_factory.DatasetFactory() #trainloader, valloader_UNUSED = factory.load_data_train(config_dict_cams) dataset = collected_dataset.CollectedDataset( data_folder= '/cvlabdata1/home/rhodin/code/humanposeannotation/python/pytorch_human_reconstruction/TMP/H36M-MultiView-train', input_types=config_dict['input_types'], label_types=config_dict['label_types_train'], useSubjectBatches=config_dict['useSubjectBatches'], useCamBatches=config_dict['useCamBatches'], # HACK useSequentialFrames=config_dict.get('useSequentialFrames', 0), randomize=True) trainloader = torch.utils.data.DataLoader( dataset, batch_size=config_dict['batch_size_train'], shuffle=True, num_workers=config_dict['num_workers'], pin_memory=False, drop_last=True, collate_fn=utils_data.default_collate_with_string) trainloader = utils_data.PostFlattenInputSubbatchTensor(trainloader) return trainloader