Exemple #1
0
def render_checkpoint(checkpoint_file):

    dataset = EvalDataset(args.data, args.test, args.view_direction)
    dataloader = DataLoader(dataset,
                            batch_size=args.batch,
                            shuffle=False,
                            num_workers=4,
                            collate_fn=EvalDataset.get_collect_fn(
                                args.view_direction))

    model = torch.load(checkpoint_file)
    model = model.to('cuda')
    model.eval()
    torch.set_grad_enabled(False)

    if args.out_mode == 'video':
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        writer = cv2.VideoWriter(
            os.path.join(args.save, f'{checkpoint_file.stem}_render.mp4'),
            fourcc, 16, (dataset.width, dataset.height), True)
    print(f'Rendering {checkpoint_file.stem}')
    for samples in tqdm.tqdm(dataloader):
        if args.view_direction:
            uv_maps, extrinsics, masks, idxs = samples
            RGB_texture, preds = model(uv_maps.cuda(), extrinsics.cuda())
        else:
            uv_maps, masks, idxs = samples
            RGB_texture, preds = model(uv_maps.cuda())

        preds = preds.cpu()
        preds.masked_fill_(masks, 0)  # fill invalid with 0

        # save result
        if args.out_mode == 'video':
            preds = preds.numpy()
            preds = np.clip(preds, -1.0, 1.0)
            for i in range(len(idxs)):
                image = img_as_ubyte(preds[i])
                image = np.transpose(image, (1, 2, 0))
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                writer.write(image)
        else:
            for i in range(len(idxs)):
                image = transforms.ToPILImage()(preds[i])
                image.save(
                    os.path.join(
                        args.save,
                        f'{checkpoint_file.stem}_{i}_render.png'.format(
                            idxs[i])))
Exemple #2
0
def evaluate(config, args):
    if args.onnx:
        model = onnx_model_for_eval(args.onnx, args.device == 'cuda')
    else:
        model = torch_model_for_eval(args.cfg, args.weight, device=args.device)
    eval_dataset = EvalDataset(config)
    evaluator = Evaluator(model, eval_dataset, config)
    AP = evaluator.evaluate()
    tools.print_metric(AP)
Exemple #3
0
 def test(self):
     eval_dataset = EvalDataset(self.cfg)
     dataloader = torch.utils.data.DataLoader(
         eval_dataset, batch_size=None, shuffle=False,
         num_workers=self._num_workers, pin_memory=True,
         collate_fn=lambda x: x,
     )
     evaluator = Evaluator(self.new_model, dataloader, self.cfg)
     self.new_model.eval()
     AP = evaluator.evaluate()
     # 打印
     tools.print_metric(AP)
Exemple #4
0
 def init_dataset(self):
     train_dataset = TrainDataset(self.config)
     eval_dataset = EvalDataset(self.config)
     # 数据集内部手动生成batch,所以此处batch_size=None
     self.train_dataloader = torch.utils.data.DataLoader(
         train_dataset,
         batch_size=self._train_batch_size,
         shuffle=False,
         num_workers=self._num_workers,
         pin_memory=True,
         collate_fn=collate_batch,
     )
     self.eval_dataloader = torch.utils.data.DataLoader(
         eval_dataset,
         batch_size=None,
         shuffle=False,
         num_workers=self._num_workers,
         pin_memory=True,
         collate_fn=lambda x: x,
     )
     print(f'{train_dataset.length} images for train.')
     print(f'{eval_dataset.length} images for evaluate.')
Exemple #5
0
parser.add_argument('--out_mode', type=str, default=config.OUT_MODE, choices=('video', 'image'))
parser.add_argument('--fps', type=int, default=config.FPS)
args = parser.parse_args()


if __name__ == '__main__':

    checkpoint_file = os.path.join(args.checkpoint, args.load)
    if not os.path.exists(checkpoint_file):
        print('checkpoint not exists!')
        sys.exit()

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    dataset = EvalDataset(args.data, args.test, args.view_direction)
    dataloader = DataLoader(dataset, batch_size=args.batch, shuffle=False, num_workers=4, collate_fn=EvalDataset.get_collect_fn(args.view_direction))

    model = torch.load(checkpoint_file)
    model = model.to('cuda')
    model.eval()
    torch.set_grad_enabled(False)

    if args.out_mode == 'video':
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        writer = cv2.VideoWriter(os.path.join(args.save, 'render.mp4'), fourcc, 16,
                                     (dataset.width, dataset.height), True)
    print('Evaluating started')
    for samples in tqdm.tqdm(dataloader):
        if args.view_direction:
            uv_maps, extrinsics, masks, idxs = samples
Exemple #6
0
                    type=str,
                    default=config.OUT_MODE,
                    choices=('video', 'image'))
args = parser.parse_args()

if __name__ == '__main__':

    checkpoint_file = os.path.join(args.checkpoint, args.load)
    if not os.path.exists(checkpoint_file):
        print('checkpoint not exists!')
        sys.exit()

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    dataset = EvalDataset(args.data, args.test, False)
    dataloader = DataLoader(dataset,
                            batch_size=args.batch,
                            shuffle=False,
                            num_workers=4,
                            collate_fn=EvalDataset.get_collect_fn(False))

    model = torch.load(checkpoint_file)
    model = model.to('cuda')
    model.eval()
    torch.set_grad_enabled(False)

    if args.out_mode == 'video':
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        writer = cv2.VideoWriter(os.path.join(args.save, 'render.mp4'), fourcc,
                                 16, (dataset.width, dataset.height), True)