def setup_human36m_dataloaders(config): # val val_dataset = human36m.Human36MMultiViewDataset( h36m_root=config.dataset.val.h36m_root, pred_results_path=config.dataset.val.pred_results_path if hasattr( config.dataset.val, "pred_results_path") else None, train=False, test=True, image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256), labels_path=config.dataset.val.labels_path, with_damaged_actions=config.dataset.val.with_damaged_actions, retain_every_n_frames_in_test=config.dataset.val. retain_every_n_frames_in_test, scale_bbox=config.dataset.val.scale_bbox, kind=config.kind, undistort_images=config.dataset.val.undistort_images, ignore_cameras=config.dataset.val.ignore_cameras if hasattr( config.dataset.val, "ignore_cameras") else [], crop=config.dataset.val.crop if hasattr(config.dataset.val, "crop") else True, ) val_dataloader = DataLoader( val_dataset, batch_size=config.opt.val_batch_size if hasattr( config.opt, "val_batch_size") else config.opt.batch_size, shuffle=config.dataset.val.shuffle, collate_fn=dataset_utils.make_collate_fn( randomize_n_views=config.dataset.val.randomize_n_views, min_n_views=config.dataset.val.min_n_views, max_n_views=config.dataset.val.max_n_views), num_workers=config.dataset.val.num_workers, worker_init_fn=dataset_utils.worker_init_fn, pin_memory=True, drop_last=False) return val_dataloader
def setup_human36m_dataloaders(config, is_train, distributed_train): train_dataloader = None if is_train: # train train_dataset = human36m.Human36MMultiViewDataset( h36m_root=config.dataset.train.h36m_root, pred_results_path=config.dataset.train.pred_results_path if hasattr(config.dataset.train, "pred_results_path") else None, train=True, test=False, image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256), labels_path=config.dataset.train.labels_path, with_damaged_actions=config.dataset.train.with_damaged_actions, scale_bbox=config.dataset.train.scale_bbox, kind=config.kind, undistort_images=config.dataset.train.undistort_images, ignore_cameras=config.dataset.train.ignore_cameras if hasattr(config.dataset.train, "ignore_cameras") else [], crop=config.dataset.train.crop if hasattr(config.dataset.train, "crop") else True, ) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if distributed_train else None train_dataloader = DataLoader( train_dataset, batch_size=config.opt.batch_size, shuffle=config.dataset.train.shuffle and (train_sampler is None), # debatable sampler=train_sampler, collate_fn=dataset_utils.make_collate_fn(randomize_n_views=config.dataset.train.randomize_n_views, min_n_views=config.dataset.train.min_n_views, max_n_views=config.dataset.train.max_n_views), num_workers=config.dataset.train.num_workers, worker_init_fn=dataset_utils.worker_init_fn, pin_memory=True ) # val val_dataset = human36m.Human36MMultiViewDataset( h36m_root=config.dataset.val.h36m_root, pred_results_path=config.dataset.val.pred_results_path if hasattr(config.dataset.val, "pred_results_path") else None, train=False, test=True, image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256), labels_path=config.dataset.val.labels_path, with_damaged_actions=config.dataset.val.with_damaged_actions, retain_every_n_frames_in_test=config.dataset.val.retain_every_n_frames_in_test, scale_bbox=config.dataset.val.scale_bbox, kind=config.kind, undistort_images=config.dataset.val.undistort_images, ignore_cameras=config.dataset.val.ignore_cameras if hasattr(config.dataset.val, "ignore_cameras") else [], crop=config.dataset.val.crop if hasattr(config.dataset.val, "crop") else True, ) val_dataloader = DataLoader( val_dataset, batch_size=config.opt.val_batch_size if hasattr(config.opt, "val_batch_size") else config.opt.batch_size, shuffle=config.dataset.val.shuffle, collate_fn=dataset_utils.make_collate_fn(randomize_n_views=config.dataset.val.randomize_n_views, min_n_views=config.dataset.val.min_n_views, max_n_views=config.dataset.val.max_n_views), num_workers=config.dataset.val.num_workers, worker_init_fn=dataset_utils.worker_init_fn, pin_memory=True ) return train_dataloader, val_dataloader, train_sampler
crop=config.dataset.val.crop if hasattr(config.dataset.val, "crop") else True, norm_image=False, frames_split_file=config.opt.frames_split_file if hasattr( config.opt, "frames_split_file") else None) elif config.kind == "human36m" or config.kind == "h36m": dataset = human36m.Human36MMultiViewDataset( h36m_root=config.dataset.val.h36m_root, pred_results_path=config.dataset.val.pred_results_path if hasattr( config.dataset.val, "pred_results_path") else None, train=False, test=True, image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256), labels_path=config.dataset.val.labels_path, with_damaged_actions=config.dataset.val.with_damaged_actions, retain_every_n_frames_in_test=config.dataset.val. retain_every_n_frames_in_test, scale_bbox=config.dataset.val.scale_bbox, kind=config.kind, undistort_images=config.dataset.val.undistort_images, ignore_cameras=config.dataset.val.ignore_cameras if hasattr( config.dataset.val, "ignore_cameras") else [], crop=config.dataset.val.crop if hasattr(config.dataset.val, "crop") else True, norm_image=False) else: raise NotImplementedError(f"{config.kind} dataset not implemented") # Load results pkl file with open(results_file, "rb") as f: data = pickle.load(f)