def getLoader(db): if db == 'syn': loader = dataloader.TestLoader(f_test) elif db == 'human36': loader = dataloader.Human36Loader() elif db == 'cad120': loader = dataloader.Cad120Loader() elif db == 'biwi': loader = dataloader.BIWILoader() elif db == 'biwiid': loader = dataloader.BIWIIDLoader() return loader
def test(model, modelin=args.model,outfile=args.out,feature_transform=args.feat_trans): # define model, dataloader, 3dmm eigenvectors, optimization method if modelin != "": model.load_state_dict(torch.load(modelin)) model.eval() # mean shape and eigenvectors for 3dmm M = 100 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float() lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float() shape = mu_lm # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_depth = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i*100 for i in range(4,21)] for f_test in f_vals: # create dataloader data = dataloader.TestLoader(f_test) error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100; N = 68; batch_size = 1; for k in range(len(data)): batch = data[k] x_cam_gt = batch['x_cam_gt'] x_w_gt = batch['x_w_gt'] f_gt = batch['f_gt'] x_img = batch['x_img'].unsqueeze(0) x_img_gt = batch['x_img_gt'] T_gt = batch['T_gt'] sequence = batch['x_img'].reshape((M,N,2)).permute(0,2,1) all_depth.append(np.mean(T_gt[:,2])) all_f.append(f_gt.numpy()[0]) one = torch.ones(batch_size,M*N,1) x_img_one = torch.cat([x_img,one],dim=2) # run the model out,_,_ = model(x_img_one.permute(0,2,1)) betas = out[:,:199] fout = torch.relu(out[:,199]) if torch.any(fout < 1): fout = fout+1 # apply 3DMM model from predicted parameters alpha_matrix = torch.diag(betas.squeeze()) shape_cov = torch.mm(lm_eigenvec,alpha_matrix) s = shape_cov.sum(1).view(68,3) #shape = (mu_lm + s) #shape = mu_lm #shape[:,2] = shape[:,2]*-1 # run epnp using predicted shape and intrinsics K = torch.zeros((3,3)) K[0,0] = fout; K[1,1] = fout; K[2,2] = 1; K[0,2] = 0; K[1,2] = 0; Xc,R,T = util.EPnP(sequence,shape,K) # get errors reproj_errors2 = util.getReprojError2(sequence,shape,R,T,K) reproj_errors3 = util.getReprojError3(x_cam_gt,shape,R,T) rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(f_gt - fout) / f_gt allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print(f"f/sequence: {f_test}/{k} | f/fgt: {fout[0].item():.3f}/{f_gt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}") #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) #end for all_f = np.stack(all_f).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) scipy.io.savemat(outfile,matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def test(modelin=args.model,outfile=args.out,optimize=args.opt,ft=args.ft): # define model, dataloader, 3dmm eigenvectors, optimization method calib_net = PointNet(n=1,feature_transform=ft) sfm_net = PointNet(n=199,feature_transform=ft) if modelin != "": calib_path = os.path.join('model','calib_' + modelin) sfm_path = os.path.join('model','sfm_' + modelin) calib_net.load_state_dict(torch.load(calib_path)) sfm_net.load_state_dict(torch.load(sfm_path)) calib_net.eval() sfm_net.eval() # mean shape and eigenvectors for 3dmm M = 100 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach() mu_lm[:,2] = mu_lm[:,2]*-1 lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach() sigma = torch.from_numpy(data3dmm.sigma).float().detach() sigma = torch.diag(sigma.squeeze()) lm_eigenvec = torch.mm(lm_eigenvec, sigma) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_fpred = [] all_depth = [] out_shape = [] out_f = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i*100 for i in range(4,15)] # set random seed for reproducibility of test set np.random.seed(0) torch.manual_seed(0) for f_test in f_vals: # create dataloader loader = dataloader.TestLoader(f_test) f_pred = [] shape_pred = [] error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100; N = 68; batch_size = 1; for j,data in enumerate(loader): if j >= 10: break # load the data x_cam_gt = data['x_cam_gt'] shape_gt = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] depth = torch.norm(x_cam_gt.mean(2),dim=1) all_depth.append(depth.numpy()) all_f.append(fgt.numpy()[0]) ptsI = x_img.reshape((M,N,2)).permute(0,2,1) x = x_img.unsqueeze(0).permute(0,2,1) # run the model f = calib_net(x) + 300 betas = sfm_net(x) betas = betas.squeeze(0).unsqueeze(-1) shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3) shape = shape - shape.mean(0).unsqueeze(0) # get motion measurement guess K = torch.zeros((3,3)).float() K[0,0] = f K[1,1] = f K[2,2] = 1 km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K) _, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI) error_time = util.getTimeConsistency(shape,R,T) if error_time > 10: mode='walk' else: mode='still' # apply dual optimization if optimize: calib_net.load_state_dict(torch.load(calib_path)) sfm_net.load_state_dict(torch.load(sfm_path)) shape,K,R,T = dualoptimization(x,calib_net,sfm_net,shape_gt=shape_gt,fgt=fgt,mode=mode) f = K[0,0].detach() else: K = torch.zeros(3,3).float() K[0,0] = f K[1,1] = f K[2,2] = 1 km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K) Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI) # get errors reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K,show=False) reproj_errors3 = torch.norm(shape_gt - shape,dim=1).mean() rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt # save final prediction f_pred.append(f.detach().cpu().item()) shape_pred.append(shape.detach().cpu().numpy()) all_fpred.append(f.detach().data.numpy()) allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print(f"f/sequence: {f_test}/{j} | f/fgt: {f.item():.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}") avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) out_f.append(np.stack(f_pred)) out_shape.append(np.concatenate(shape_pred,axis=0)) print(f"f_error_rel: {avg_relf:.4f} | rel rmse: {avg_rel3d:.4f} | 2d error: {avg_2d:.4f} | rmse: {avg_3d:.4f} |") # save output out_shape = np.stack(out_shape) out_f = np.stack(out_f) all_f = np.stack(all_f).flatten() all_fpred = np.stack(all_fpred).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_fpred'] = np.array(all_fpred) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) matdata['shape'] = np.stack(out_shape) matdata['f'] = np.stack(out_f) scipy.io.savemat(outfile,matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}") return np.mean(seterror_relf)
##################################################### # Start main ##################################################### folder = '32_a' acc = '0.6626940598821827' TEST = False GAUS = True enc_last = torch.load(f'{folder}/enc_last_{acc}.pkl') encoder = torch.load(f'{folder}/encoder_{acc}.pkl') decoder = torch.load(f'{folder}/decoder_{acc}.pkl') if TEST: test_loader = hand_DL.TestLoader('test') print_loss_total = 0 bleu_total = 0 test_len = len(test_loader) criterion = nn.CrossEntropyLoss() for idx, data in enumerate(test_loader): x = torch.from_numpy(data[0]).to(device) y = torch.from_numpy(data[1]).to(device) loss, bleu = test(x, int(data[2]), y, int(data[3]), encoder, decoder, enc_last, criterion) print_loss_total += loss bleu_total += bleu print_loss_total /= test_len bleu_total /= test_len print(f'Test loss: {print_loss_total}, bleu: {bleu_total}')
def test_sfm(modelin=args.model, outfile=args.out, optimize=args.opt): # define model, dataloader, 3dmm eigenvectors, optimization method calib_net = CalibrationNet3(n=1) sfm_net = CalibrationNet3(n=199) calib_path = os.path.join('model', 'calib_' + modelin) sfm_path = os.path.join('model', 'sfm_' + modelin) # mean shape and eigenvectors for 3dmm M = 100 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach() mu_lm[:, 2] = mu_lm[:, 2] * -1 lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach() sigma = torch.from_numpy(data3dmm.sigma).float().detach() sigma = torch.diag(sigma.squeeze()) lm_eigenvec = torch.mm(lm_eigenvec, sigma) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_fpred = [] all_depth = [] out_shape = [] out_f = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i * 100 for i in range(4, 15)] for f_test in f_vals: # create dataloader #f_test = 1000 loader = dataloader.TestLoader(f_test) f_pred = [] shape_pred = [] error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100 N = 68 batch_size = 1 training_pred = np.zeros((10, 100, 68, 3)) training_gt = np.zeros((10, 100, 68, 3)) for j, data in enumerate(loader): if j == 10: break # load the data x_cam_gt = data['x_cam_gt'] shape_gt = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] T_gt = data['T_gt'] all_depth.append(np.mean(T_gt[:, 2])) all_f.append(fgt.numpy()[0]) ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1) x = ptsI.unsqueeze(0).permute(0, 2, 1, 3) # test camera calibration #calib_net.load_state_dict(torch.load(calib_path)) opt2 = torch.optim.Adam(sfm_net.parameters(), lr=1e-5) sfm_net.eval() trainfc(sfm_net) f = 2000 for iter in itertools.count(): opt2.zero_grad() # shape prediction betas = sfm_net.forward2(x) betas = torch.clamp(betas, -20, 20) shape = torch.sum(betas * lm_eigenvec, 1) shape = shape.reshape(68, 3) + mu_lm shape = shape - shape.mean(0).unsqueeze(0) rmse = torch.norm(shape_gt - shape, dim=1).mean().detach() K = torch.zeros((3, 3)).float() K[0, 0] = f K[1, 1] = f K[2, 2] = 1 km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K) Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas, shape, ptsI, K) error2d = util.getReprojError2(ptsI, shape, R, T, K, show=False, loss='l2') error_time = util.getTimeConsistency(shape, R, T) loss = error2d.mean() + 0.01 * error_time loss.backward() opt2.step() print( f"iter: {iter} | error: {loss.item():.3f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse.item():.3f} " ) if iter == 100: break training_pred[j, iter, :, :] = shape.detach().cpu().numpy() training_gt[j, iter, :, :] = shape_gt.detach().cpu().numpy() # get errors reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K, show=False) reproj_errors3 = torch.norm(shape_gt - shape, dim=1).mean() rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt # save final prediction shape_pred.append(shape.detach().cpu().numpy()) allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print( f"f/sequence: {f_test}/{j} | f/fgt: {f:.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}" ) avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) out_shape.append(np.stack(shape_pred, axis=0)) print( f"f_error_rel: {avg_relf:.4f} | rel rmse: {avg_rel3d:.4f} | 2d error: {reproj_error.item():.4f} | rmse: {avg_3d:.4f} |" ) all_f = np.stack(all_f).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['training_pred'] = training_pred matdata['training_gt'] = training_gt matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) scipy.io.savemat(outfile, matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def trainIters(encoder, decoder, enc_last, n_iters, print_every=1000, plot_every=100, learning_rate=0.01): train_loss_total = 0 train_loss_list = [] train_KL_total = 0 train_KL_list = [] test_bleu_list = [] print_loss_total = 0 # Reset every print_every kl_total = 0 bleu_total = 0 encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate) enc_last_optimizer = optim.SGD(enc_last.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() train_data = hand_DL.TrainLoader('train') train_loader = Data.DataLoader(dataset = train_data, batch_size = 1, shuffle = True, num_workers=2) data_len = len(train_loader) cnt = 0 tot_cnt = 0 highest_score = 0 for iter in range(30, n_iters+1): print(f"Epoch: {iter}") global KLD_weight global highest_bleu global teacher_forcing_ratio KLD_weight = 0 cnt = 0 print_loss_total = 0 bleu_total = 0 kl_total = 0 # ###################### # slope = 0.01 # KLD_weight = iter * slope # if KLD_weight > 1.0: # KLD_weight = 1.0 # ################### # slope = 0.01 # teacher_forcing_ratio = 1.0 - (slope * iter) # if teacher_forcing_ratio <= 0.0: # teacher_forcing_ratio = 0.0 # ###################### for idx, data in enumerate(train_loader): i_cond = data[0][0] t_cond = data[1][0] x = data[0][1].to(device) y = data[1][1].to(device) loss, bleu, kl_loss = train(x, i_cond, y, t_cond, encoder, decoder, enc_last, encoder_optimizer, decoder_optimizer, enc_last_optimizer, criterion) kl_total += kl_loss print_loss_total += loss bleu_total += bleu KLD_constraint = 1 if bleu > highest_bleu: highest_bleu = bleu if bleu > 0.8: teacher_forcing_ratio = 0 elif bleu > 0.7: teacher_forcing_ratio = 0.1 elif bleu > 0.6: teacher_forcing_ratio = 0.2 KLD_constraint = 0.95 elif bleu > 0.5: teacher_forcing_ratio = 0.3 KLD_constraint = 0.9 elif bleu > 0.4: teacher_forcing_ratio = 0.4 KLD_constraint = 0.85 elif bleu > 0.3: teacher_forcing_ratio = 0.5 KLD_constraint = 0.8 elif bleu > 0.2: teacher_forcing_ratio = 0.6 KLD_constraint = 0.6 else: teacher_forcing_ratio = 0.8 KLD_constraint = 0.5 KLD_weight += 0.0001 if KLD_weight > KLD_constraint: KLD_weight = KLD_constraint cnt += 1 tot_cnt += 1 if idx % print_every == 0: train_KL_total += kl_total train_loss_total += print_loss_total print_loss_avg = print_loss_total/cnt bleu_avg = bleu_total/cnt print(f'Iter {idx}/{data_len} loss: {print_loss_avg}, kl_loss: {kl_total/cnt}, bleu: {bleu_avg}') cnt = 0 print_loss_total = 0 bleu_total = 0 kl_total = 0 test_loader = hand_DL.TestLoader('test') print_loss_total = 0 bleu_total = 0 test_len = len(test_loader) for idx, data in enumerate(test_loader): x = torch.from_numpy(data[0]).to(device) y = torch.from_numpy(data[1]).to(device) loss, bleu = test(x, int(data[2]), y, int(data[3]), encoder, decoder, enc_last, criterion) print_loss_total += loss bleu_total += bleu print_loss_total /= test_len bleu_total /= test_len print(f'Test loss: {print_loss_total}, bleu: {bleu_total}') with open(f'{latent_hidden_size}/train_loss', 'a') as f: f.write(f'{str(train_loss_total/tot_cnt)}\n') with open(f'{latent_hidden_size}/train_KL_loss', 'a') as f: f.write(f'{str(train_KL_total/tot_cnt)}\n') with open(f'{latent_hidden_size}/test_bleu', 'a') as f: f.write(f'{str(bleu_total)}\n') test_bleu_list.append(bleu_total) train_loss_list.append(train_loss_total/tot_cnt) train_KL_list.append(train_KL_total/tot_cnt) train_loss_total = 0 train_KL_total = 0 tot_cnt = 0 if bleu_total > highest_score: highest_score = bleu_total torch.save(encoder, f'/home/karljackab/DL/lab5/{latent_hidden_size}/encoder_{str(bleu_total)}.pkl') torch.save(decoder, f'/home/karljackab/DL/lab5/{latent_hidden_size}/decoder_{str(bleu_total)}.pkl') torch.save(enc_last, f'/home/karljackab/DL/lab5/{latent_hidden_size}/enc_last_{str(bleu_total)}.pkl') print('save model')
def test(modelin=args.model,outfile=args.out,optimize=args.opt): # define model, dataloader, 3dmm eigenvectors, optimization method calib_net = CalibrationNet3(n=1) sfm_net = CalibrationNet3(n=199) if modelin != "": calib_path = os.path.join('model','calib_' + modelin) sfm_path = os.path.join('model','sfm_' + modelin) calib_net.load_state_dict(torch.load(calib_path)) sfm_net.load_state_dict(torch.load(sfm_path)) calib_net.eval() sfm_net.eval() # mean shape and eigenvectors for 3dmm M = 100 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach() mu_lm[:,2] = mu_lm[:,2]*-1 lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach() sigma = torch.from_numpy(data3dmm.sigma).float().detach() sigma = torch.diag(sigma.squeeze()) lm_eigenvec = torch.mm(lm_eigenvec, sigma) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_fpred = [] all_depth = [] out_shape = [] out_f = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i*100 for i in range(4,15)] for f_test in f_vals: # create dataloader #f_test = 1000 loader = dataloader.TestLoader(f_test) f_pred = [] shape_pred = [] error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100; N = 68; batch_size = 1; for j,data in enumerate(loader): if j == 10: break # load the data x_cam_gt = data['x_cam_gt'] shape_gt = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] T_gt = data['T_gt'] all_depth.append(np.mean(T_gt[:,2])) all_f.append(fgt.numpy()[0]) ptsI = x_img.reshape((M,N,2)).permute(0,2,1) x = ptsI.unsqueeze(0).permute(0,2,1,3) # run the model f = calib_net(x) + 300 betas = sfm_net(x) betas = betas.squeeze(0).unsqueeze(-1) shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3) # additional optimization on initial solution if optimize: calib_net.load_state_dict(torch.load(calib_path)) sfm_net.load_state_dict(torch.load(sfm_path)) calib_net.eval() sfm_net.eval() trainfc(calib_net) trainfc(sfm_net) opt1 = torch.optim.Adam(calib_net.parameters(),lr=1e-4) opt2 = torch.optim.Adam(sfm_net.parameters(),lr=1e-2) curloss = 100 for outerloop in itertools.count(): # camera calibration shape = shape.detach() for iter in itertools.count(): opt1.zero_grad() f = calib_net.forward2(x) + 300 K = torch.zeros(3,3).float() K[0,0] = f K[1,1] = f K[2,2] = 1 f_error = torch.mean(torch.abs(f - fgt)) rmse = torch.norm(shape_gt - shape,dim=1).mean() # differentiable PnP pose estimation km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K) Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K) error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2') #error2d = util.getReprojError2_(ptsI,Xc,K,show=True,loss='l2') error_time = util.getTimeConsistency(shape,R,T) loss = error2d.mean() + 0.01*error_time if iter == 5: break loss.backward() opt1.step() print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse.item():.3f} ") # sfm f = f.detach() for iter in itertools.count(): opt2.zero_grad() # shape prediction betas = sfm_net.forward2(x) shape = torch.sum(betas * lm_eigenvec,1) shape = shape.reshape(68,3) + mu_lm shape = shape - shape.mean(0).unsqueeze(0) K = torch.zeros((3,3)).float() K[0,0] = f K[1,1] = f K[2,2] = 1 #rmse = torch.norm(shape_gt - shape,dim=1).mean().detach() rmse = torch.norm(shape_gt - shape,dim=1).mean().detach() # differentiable PnP pose estimation km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K) Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K) error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2') error_time = util.getTimeConsistency(shape,R,T) loss = error2d.mean() + 0.01*error_time if iter == 5: break if iter > 10 and prev_loss < loss: break else: prev_loss = loss loss.backward() opt2.step() print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse.item():.3f} ") # closing condition for outerloop on dual objective if torch.abs(curloss - loss) < 0.01: break curloss = loss else: K = torch.zeros(3,3).float() K[0,0] = f K[1,1] = f K[2,2] = 1 km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K) Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K) all_fpred.append(f.detach().numpy()[0]) # get errors reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K,show=False) reproj_errors3 = torch.norm(shape_gt - shape,dim=1).mean() rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt # save final prediction f_pred.append(f.detach().cpu().item()) shape_pred.append(shape.detach().cpu().numpy()) allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print(f"f/sequence: {f_test}/{j} | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}") avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) out_f.append(np.stack(f_pred)) out_shape.append(np.stack(shape_pred,axis=0)) print(f"f_error_rel: {avg_relf:.4f} | rel rmse: {avg_rel3d:.4f} | 2d error: {reproj_error.item():.4f} | rmse: {avg_3d:.4f} |") out_shape = np.stack(out_shape) out_f = np.stack(out_f) all_f = np.stack(all_f).flatten() all_fpred = np.stack(all_fpred).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_fpred'] = np.array(all_fpred) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) matdata['shape'] = np.stack(out_shape) matdata['f'] = np.stack(out_f) scipy.io.savemat(outfile,matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def test(model, modelin=args.model, outfile=args.out, feature_transform=args.feat_trans): # define model, dataloader, 3dmm eigenvectors, optimization method if modelin != "": model.load_state_dict(torch.load(modelin)) model.eval() # mean shape and eigenvectors for 3dmm M = 100 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float() lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float() shape = mu_lm # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_depth = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i * 100 for i in range(4, 21)] for f_test in f_vals: # create dataloader data = dataloader.TestLoader(f_test) error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100 N = 68 batch_size = 1 for k in range(len(data)): batch = data[4] x_cam_gt = batch['x_cam_gt'] x_w_gt = batch['x_w_gt'] f_gt = batch['f_gt'] x_img = batch['x_img'].unsqueeze(0) x_img_gt = batch['x_img_gt'] T_gt = batch['T_gt'] sequence = batch['x_img'].reshape((M, N, 2)).permute(0, 2, 1) all_depth.append(np.mean(T_gt[:, 2])) all_f.append(f_gt.numpy()[0]) x = x_img.reshape((batch_size, M, N, 2)).permute(0, 3, 2, 1) / 640 x_one = torch.cat( [x.squeeze().permute(2, 0, 1) * 640, torch.ones(M, 1, N)], dim=1) # run the model out = model(x) betas = out[:, :199] fout = torch.relu(out[:, 199]) if torch.any(fout < 1): fout = fout + 1 # apply 3DMM model from predicted parameters alpha_matrix = torch.diag(betas.squeeze()) shape_cov = torch.mm(lm_eigenvec, alpha_matrix) s = shape_cov.sum(1).view(68, 3) #shape = (mu_lm + s) #shape = mu_lm #shape[:,2] = shape[:,2]*-1 # create variables and optimizer for variables as SGD # run epnp using predicted shape and intrinsics varf = Variable(fout, requires_grad=True) K = torch.zeros((3, 3)) K[0, 0] = varf K[1, 1] = varf K[2, 2] = 1 K[0, 2] = 0 K[1, 2] = 0 Xc, R, T = util.EPnP(sequence, shape, K) tmpT = T.detach() tmpR = R.detach() varR = Variable(R, requires_grad=True) varT = Variable(T, requires_grad=True) optimizer = torch.optim.Adam([varR, varT], lr=1e-1) # optimize results for image consistency ferror = [] losses = [] minerror = 10000 for iter in itertools.count(): K = torch.zeros((3, 3)) K[0, 0] = varf K[1, 1] = varf K[2, 2] = 1 K[0, 2] = 0 K[1, 2] = 0 R = varR T = varT Xc, _, _ = util.EPnP(sequence, shape, K) #Xc,R,T = util.EPnP(sequence,shape,K) optimizer.zero_grad() # k inverse kinv = torch.zeros(3, 3).float() kinv[0, 0] = 1 / varf kinv[1, 1] = 1 / varf kinv[2, 2] = 1 # get errors reproj_errors2 = util.getReprojError2(sequence, shape, R, T, K) #reproj_errors3 = util.getReprojError3(x_cam_gt,shape,varR,varT) error_3d = util.getRelReprojError3(x_cam_gt, shape, R, T).mean() #error_3d = util.getPCError(x_cam_gt,x_one.permute(0,2,1),torch.stack(100*[kinv]),mode='l2') error_Rconsistency = util.getRConsistency(R) error_Tconsistency = util.getTConsistency(T) * 0.001 error_3dconsistency = util.get3DConsistency( sequence, shape, kinv, R, T) reproj_error = torch.mean(reproj_errors2) # determine convergence loss = error_3dconsistency if loss < minerror: minerror = loss minf = varf.item() minR = R minT = T convergence = 0 else: convergence += 1 loss.backward() optimizer.step() f = util.solvef(sequence, Xc.detach()) print(f) #if varf < 0: varf = varf*-1 delta = K[0, 0] - varf direction = torch.sign(delta) error_f = torch.abs(varf - f_gt) / f_gt ferror.append(error_f.item()) losses.append(loss.item()) print( f"iter: {iter} | loss: {loss.item():.3f} | f/fgt: {varf.item():.3f}/{f_gt.item():.3f} | 2d error: {reproj_error.item():.3f} | error R: {error_Rconsistency.item():.3f} | error T: {error_Tconsistency.item():.3f} | error 3d: {error_3dconsistency.item():.3f} | GT RMSE: {error_3d.item():.3f} | delta: {delta.item():.3f}" ) if convergence == 100: break data = {'ferror': np.array(ferror), 'loss': np.array(losses)} scipy.io.savemat("optimizationlr1.mat", data) quit() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(f_gt - fout) / f_gt allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print( f"f/sequence: {f_test}/{k} | f/fgt: {fout[0].item():.3f}/{f_gt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}" ) #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) #end for all_f = np.stack(all_f).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) scipy.io.savemat(outfile, matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def test(modelin=args.model,outfile=args.out,feature_transform=args.ft): # define model, dataloader, 3dmm eigenvectors, optimization method #if modelin != "": # model.load_state_dict(torch.load(modelin)) #model.eval() #model.cuda() # mean shape and eigenvectors for 3dmm M = 100 N = 68 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float() mu_lm[:,2] = mu_lm[:,2]*-1 le = torch.mean(mu_lm[36:42,:],axis=0) re = torch.mean(mu_lm[42:48,:],axis=0) ipd = torch.norm(le - re) lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float() #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_fpred = [] all_depth = [] out_shape = [] out_f = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i*100 for i in range(4,15)] # set random seed for reproducibility of test set for f_test in f_vals: # create dataloader loader = dataloader.TestLoader(f_test) f_pred = [] shape_pred = [] error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100; N = 68; batch_size = 1; for j, data in enumerate(loader): if j == 10: break # create bpnp camera calibration model calib_net= (1.1*torch.randn(1)).requires_grad_() # create bpnp sfm model sfm_net = torchvision.models.vgg11() sfm_net.classifier = torch.nn.Linear(25088,N*3) # load the data x_cam_gt = data['x_cam_gt'] shape_gt = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] depth = torch.norm(x_cam_gt.mean(2),dim=1) all_depth.append(depth.numpy()) all_f.append(fgt.numpy()[0]) ptsI = x_img.reshape((M,N,2)).permute(0,2,1) x_img_pts = x_img.reshape((M,N,2)).permute(0,2,1) one = torch.ones(M*N,1) x_img_one = torch.cat([x_img,one],dim=1) x = x_img_one.permute(1,0) # run the model f = torch.sigmoid(calib_net)*2000 shape = mu_lm ini_pose = torch.zeros((M,6)) ini_pose[:,5] = 99 curloss = 100 # apply dual optimization shape,K,R,T = dualoptimization(x,ptsI,x2d,ini_pose,calib_net,sfm_net,shape_gt=shape_gt,fgt=fgt) f = K[0,0].detach() all_fpred.append(f.item()) # get errors km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K) Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K) # get errors reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K) reproj_errors3 = torch.norm(shape_gt - shape,dim=1).mean() rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt # save final prediction f_pred.append(f.detach().cpu().item()) shape_pred.append(shape.detach().cpu().numpy()) allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print(f"f/sequence: {f_test}/{j} | f/fgt: {f.item():.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}") #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) out_f.append(np.stack(f_pred)) out_shape.append(np.stack(shape_pred,axis=0)) #end for all_f = np.stack(all_f).flatten() all_fpred = np.stack(all_fpred).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} #matdata['shape'] = shape.detach().cpu().numpy() matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_fpred'] = np.array(all_fpred) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) matdata['shape'] = np.stack(out_shape) matdata['f'] = np.stack(out_f) scipy.io.savemat(outfile,matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def test(modelin=args.model,outfile=args.out,feature_transform=args.feat_trans): # define model, dataloader, 3dmm eigenvectors, optimization method #if modelin != "": # model.load_state_dict(torch.load(modelin)) #model.eval() #model.cuda() # mean shape and eigenvectors for 3dmm M = 100 N = 68 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float() mu_lm[:,2] = mu_lm[:,2]*-1 lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float() #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_depth = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [400 + i*100 for i in range(4)] # set random seed for reproducibility of test set np.random.seed(0) for f_test in f_vals: f_test = 1400 # create dataloader loader = dataloader.TestLoader(f_test) error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100; N = 68; batch_size = 1; for j, data in enumerate(loader): # create a model and optimizer for it theta1 = (1.1*torch.randn(4)).requires_grad() optimizer = torch.optim.SGD({theta},lr=0.00001) model2 = Model1(k=199,feature_transform=False) model2.apply(util.init_weights) model = Model1(k=1, feature_transform=False) model.apply(util.init_weights) optimizer = torch.optim.Adam(list(model.parameters()) + list(model2.parameters()),lr=1) # load the data x_cam_gt = data['x_cam_gt'] shape_gt = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] T_gt = data['T_gt'] all_depth.append(np.mean(T_gt[:,2])) all_f.append(fgt.numpy()[0]) ptsI = x_img.reshape((M,N,2)).permute(0,2,1) x2d = x_img.view((M,N,2)) x_img_pts = x_img.reshape((M,N,2)).permute(0,2,1) one = torch.ones(M*N,1) x_img_one = torch.cat([x_img,one],dim=1) x = x_img_one.permute(1,0) ini_pose = torch.zeros((M,6)) ini_pose[:,5] = 99 pre_loss = 99 for iter in itertools.count(): optimizer.zero_grad() # shape prediction betas,_,_ = model2(x.unsqueeze(0)) shape = torch.sum(betas * lm_eigenvec,1) shape = shape.reshape(68,3) + mu_lm #shape = shape_gt # RMSE between GT and predicted shape rmse = torch.norm(shape_gt - shape,dim=1).mean().detach() # focal length prediction f,_,_ = model(x.unsqueeze(0)) f = f + 300 K = torch.zeros((3,3)).float() K[0,0] = f K[1,1] = f K[2,2] = 1 # differentiable PnP pose estimation pose = bpnp(x2d,shape,K,ini_pose) pred = BPnP.batch_project(pose,shape,K) # loss #loss = torch.mean(torch.abs(pred - x2d)) loss = torch.mean(torch.norm(pred - x2d,dim=2)) loss.backward() optimizer.step() print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}") if iter == 200: break ini_pose = pose.detach() # get errors km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K) Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K) # get errors reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K) reproj_errors3 = util.getReprojError3(x_cam_gt,shape,R,T) rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print(f"f/sequence: {f_test}/{j} | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}") #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) #end for break all_f = np.stack(all_f).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) scipy.io.savemat(outfile,matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def test(modelin=args.model, outfile=args.out, feature_transform=args.feat_trans): # define model, dataloader, 3dmm eigenvectors, optimization method #if modelin != "": # model.load_state_dict(torch.load(modelin)) #model.eval() #model.cuda() # mean shape and eigenvectors for 3dmm M = 100 N = 68 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float() mu_lm[:, 2] = mu_lm[:, 2] * -1 shape = mu_lm.detach() lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float() #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_depth = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i * 100 for i in range(4, 21)] for f_test in f_vals: f_test = 1400 # create dataloader loader = dataloader.TestLoader(f_test) error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100 N = 68 batch_size = 1 for j, data in enumerate(loader): # create a model and optimizer for it model = Model2(k=1, feature_transform=False) model.apply(util.init_weights) optimizer = torch.optim.Adam(model.parameters(), lr=1e-1) M = loader.M N = loader.N # load the data T_gt = data['T_gt'] x_cam_gt = data['x_cam_gt'] x_w_gt = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1) one = torch.ones(M * N, 1) x_img_one = torch.cat([x_img, one], dim=1) x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3) all_depth.append(np.mean(T_gt[:, 2])) all_f.append(fgt.numpy()[0]) # create the input b = 10 x = x_img_one.reshape(M, N, 3).reshape(b, M // b, N, 3).reshape(b, M // b * N, 3) x = x.permute(0, 2, 1) ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1) # optimize using EPNP+GN fvals = [] errors = [] for iter in itertools.count(): optimizer.zero_grad() f, _, _ = model(x) #f = f + 1000 f = torch.nn.functional.leaky_relu(f) + 300 K = torch.zeros((b, 3, 3)).float() K[:, 0, 0] = f.squeeze() K[:, 1, 1] = f.squeeze() K[:, 2, 2] = 1 # differentiable pose estimation losses = [] for i in range(b): j = i + 1 km, c_w, scaled_betas, alphas = util.EPnP( ptsI[i:j * b], shape, K[i]) Xc, R, T, _ = util.optimizeGN(km, c_w, scaled_betas, alphas, shape, ptsI[i:j * b], K[i]) error2d = util.getReprojError2(ptsI[i:j * b], shape, R, T, K[i]).mean() losses.append(error2d) loss = torch.stack(losses).mean() loss.backward() optimizer.step() print( f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.mean().item():.1f}/{fgt[0].item():.1f}" ) if iter == 100: break # get overall poses f = f.mean() K = torch.zeros((3, 3)).float() K[0, 0] = f K[1, 1] = f K[2, 2] = 1 km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K) Xc, R, T, _ = util.optimizeGN(km, c_w, scaled_betas, alphas, shape, ptsI, K) # get errors reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K) reproj_errors3 = util.getReprojError3(x_cam_gt, shape, R, T) rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print( f"f/sequence: {f_test}/{j} | f/fgt: {f.item():.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}" ) #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) #end for break all_f = np.stack(all_f).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) scipy.io.savemat(outfile, matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def test(modelin=args.model, outfile=args.out, feature_transform=args.feat_trans): # define model, dataloader, 3dmm eigenvectors, optimization method #if modelin != "": # model.load_state_dict(torch.load(modelin)) #model.eval() #model.cuda() # mean shape and eigenvectors for 3dmm M = 100 N = 68 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float() mu_lm[:, 2] = mu_lm[:, 2] * -1 shape = mu_lm.detach() lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float() #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_depth = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i * 100 for i in range(4, 21)] np.random.seed(0) for f_test in f_vals: f_test = 1200 # create dataloader loader = dataloader.TestLoader(f_test) error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100 N = 68 batch_size = 1 for j, data in enumerate(loader): # create a model and optimizer for it #model2 = Model1(k=199,feature_transform=False) #model2.apply(util.init_weights) model = Model1(k=1, feature_transform=False) model.apply(util.init_weights) optimizer = torch.optim.Adam(model.parameters(), lr=2e-1) #data = loader[67] x_cam_gt = data['x_cam_gt'] shape = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] T_gt = data['T_gt'] all_depth.append(np.mean(T_gt[:, 2])) all_f.append(fgt.numpy()[0]) x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1) one = torch.ones(M * N, 1) x_img_one = torch.cat([x_img, one], dim=1) x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3) x = x_img_one.permute(1, 0) ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1) for iter in itertools.count(): optimizer.zero_grad() #betas,_,_ = model2(x.unsqueeze(0)) #shape = torch.sum(betas * lm_eigenvec,1) #shape = shape.reshape(68,3) + mu_lm f, _, _ = model(x.unsqueeze(0)) #f = f + 300 #f = (torch.nn.functional.tanh(f)+1)*850 + 300 f = f + 300 #f = torch.nn.functional.sigmoid(f) K = torch.zeros((3, 3)).float() K[0, 0] = f K[1, 1] = f K[2, 2] = 1 # differentiable pose estimation km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K) Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas, shape, ptsI, K) error2d = util.getReprojError2(ptsI, shape, R, T, K, show=False, loss='l1') loss = error2d.mean() loss.backward() if torch.any(model.fc2.weight.grad != model.fc2.weight.grad): print("oh oh something broke") break optimizer.step() print( f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f}" ) if iter == 200: break # get errors reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K) reproj_errors3 = util.getReprojError3(x_cam_gt, shape, R, T) rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print( f"f/sequence: {f_test}/{j} | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}" ) #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) #end for break all_f = np.stack(all_f).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) scipy.io.savemat(outfile, matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
def test(model, modelin=args.model,outfile=args.out,feature_transform=args.feat_trans): # define model, dataloader, 3dmm eigenvectors, optimization method if modelin != "": model.load_state_dict(torch.load(modelin)) model.cuda() # mean shape and eigenvectors for 3dmm M = 100 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float().cuda() lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().cuda() # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] allerror_f = [] allerror_d = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [i*100 for i in range(4,21)] for f_test in f_vals: # create dataloader data = dataloader.TestLoader(f_test) error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] for k in range(len(data)): batch = data[k] x_cam_gt = batch['x_cam_gt'].cuda() x_w_gt = batch['x_w_gt'].cuda() f_gt = batch['f_gt'].cuda() x_img = batch['x_img'].cuda() x_img_gt = batch['x_img_gt'].cuda() T_gt = batch['T_gt'] allerror_d.append(T_gt[:,2]) one = torch.ones(M,1,68).cuda() x_img_one = torch.cat([x_img,one],dim=1) # run the model out, trans, transfeat = model(x_img_one) alphas = out[:,:199].mean(0) f = torch.relu(out[:,199]).mean() K = torch.zeros((3,3)).float().cuda() for f = np.linspace(-200,200,100): K[0,0] = f; K[1,1] = f; K[2,2] = 1; K[0,2] = 320; K[1,2] = 240; # apply 3DMM model from predicted parameters alpha_matrix = torch.diag(alphas) shape_cov = torch.mm(lm_eigenvec,alpha_matrix) s = shape_cov.sum(1).view(68,3) #shape = (mu_lm + s) shape = mu_lm shape[:,2] = shape[:,2]*-1 # run epnp algorithm # get control points c_w = util.getControlPoints(shape) # solve alphas alphas = util.solveAlphas(shape,c_w) # setup M px = 320; py = 240; Matrix = util.setupM(alphas,x_img.permute(0,2,1),px,py,f) # get eigenvectors of M for each view u,d,v = torch.svd(Matrix) #solve N=1 c_c_n1 = v[:,:,-1].reshape((100,4,3)).permute(0,2,1) _ , x_c_n1, _ = util.scaleControlPoints(c_c_n1,c_w[:3,:],alphas,shape) Rn1,Tn1 = util.getExtrinsics(x_c_n1,shape) reproj_error2_n1 = util.getReprojError2(x_img,shape,Rn1,Tn1,K) reproj_error3_n1 = util.getReprojError3(x_cam_gt,shape,Rn1,Tn1) rel_error_n1 = util.getRelReprojError3(x_cam_gt,shape,Rn1,Tn1) # solve N=2 # get distance contraints d12,d13,d14,d23,d24,d34 = util.getDistances(c_w) distances = torch.stack([d12,d13,d14,d23,d24,d34])**2 beta_n2 = util.getBetaN2(v[:,:,-2:],distances) c_c_n2 = util.getControlPointsN2(v[:,:,-2:],beta_n2) _,x_c_n2,_ = util.scaleControlPoints(c_c_n2,c_w[:3,:],alphas,shape) Rn2,Tn2 = util.getExtrinsics(x_c_n2,shape) reproj_error2_n2 = util.getReprojError2(x_img,shape,Rn2,Tn2,K) reproj_error3_n2 = util.getReprojError3(x_cam_gt,shape,Rn2,Tn2) rel_error_n2 = util.getRelReprojError3(x_cam_gt,shape,Rn1,Tn1) mask = reproj_error2_n1 < reproj_error2_n2 reproj_errors = torch.cat((reproj_error2_n1[mask],reproj_error2_n2[~mask])) rmse_errors = torch.cat((reproj_error3_n1[mask],reproj_error3_n2[~mask])) rel_errors = torch.cat((rel_error_n2[~mask],rel_error_n1[mask])) print(rel_errors.mean()) quit() # errors allerror_3d.append(reproj_errors.cpu().data.numpy()) allerror_2d.append(rmse_errors.cpu().data.numpy()) allerror_rel3d.append(rel_errors.cpu().data.numpy()) reproj_error = torch.mean(reproj_errors) reconstruction_error = torch.mean(rmse_errors) rel_error = torch.mean(rel_errors) f_error = torch.abs(f_gt - f) / f_gt error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print(f"f/sequence: {f_test}/{k} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}") #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf)
def test(modelin=args.model, outfile=args.out, feature_transform=args.feat_trans): # define model, dataloader, 3dmm eigenvectors, optimization method #if modelin != "": # model.load_state_dict(torch.load(modelin)) #model.eval() #model.cuda() # mean shape and eigenvectors for 3dmm M = 100 N = 68 data3dmm = dataloader.SyntheticLoader() mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach() mu_lm[:, 2] = mu_lm[:, 2] * -1 lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float() #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2) # sample from f testing set allerror_2d = [] allerror_3d = [] allerror_rel3d = [] allerror_relf = [] all_f = [] all_depth = [] seterror_3d = [] seterror_rel3d = [] seterror_relf = [] seterror_2d = [] f_vals = [400 + i * 100 for i in range(4)] # set random seed for reproducibility of test set np.random.seed(0) torch.manual_seed(0) for f_test in f_vals: f_test = 1400 # create dataloader loader = dataloader.TestLoader(f_test) error_2d = [] error_3d = [] error_rel3d = [] error_relf = [] M = 100 N = 68 batch_size = 1 for j, data in enumerate(loader): # create a model and optimizer for it model2 = Model1(k=199, feature_transform=False) model2.apply(util.init_weights) model = Model1(k=1, feature_transform=False) model.apply(util.init_weights) opt1 = torch.optim.Adam(model2.parameters(), lr=1e-1) opt2 = torch.optim.Adam(model.parameters(), lr=1e-1) # load the data x_cam_gt = data['x_cam_gt'] shape_gt = data['x_w_gt'] fgt = data['f_gt'] x_img = data['x_img'] x_img_gt = data['x_img_gt'] T_gt = data['T_gt'] all_depth.append(np.mean(T_gt[:, 2])) all_f.append(fgt.numpy()[0]) x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1) one = torch.ones(M * N, 1) x_img_one = torch.cat([x_img, one], dim=1) x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3) x = x_img_one.permute(1, 0) ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1) # multi objective optimization shape = mu_lm for outerloop in itertools.count(): # calibration alg3 shape = shape.detach() for iter2 in itertools.count(): opt2.zero_grad() # focal length prediction curf, _, _ = model(x.unsqueeze(0)) curf = curf + 300 K = torch.zeros((3, 3)).float() K[0, 0] = curf K[1, 1] = curf K[2, 2] = 1 # RMSE between GT and predicted shape rmse = torch.norm(shape_gt - shape, dim=1).mean().detach() # differentiable PnP pose estimation km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K) Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas, shape, ptsI, K) error2d = util.getReprojError2(ptsI, shape, R, T, K, show=False, loss='l2') loss = error2d.mean() if iter2 > 20 and prev_loss < loss: break else: prev_loss = loss loss.backward() opt2.step() print( f"iter: {iter2} | error: {loss.item():.3f} | f/fgt: {curf.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}" ) # sfm alg2 curf = curf.detach() for iter1 in itertools.count(): opt1.zero_grad() # shape prediction betas, _, _ = model2(x.unsqueeze(0)) shape = torch.sum(betas * lm_eigenvec, 1) shape = shape.reshape(68, 3) + mu_lm K = torch.zeros((3, 3)).float() K[0, 0] = curf K[1, 1] = curf K[2, 2] = 1 # RMSE between GT and predicted shape rmse = torch.norm(shape_gt - shape, dim=1).mean().detach() # differentiable PnP pose estimation km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K) Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas, shape, ptsI, K) error2d = util.getReprojError2(ptsI, shape, R, T, K, show=False, loss='l2') loss = error2d.mean() if iter1 > 20 and prev_loss < loss: break else: prev_loss = loss loss.backward() opt1.step() print( f"iter: {iter1} | error: {loss.item():.3f} | f/fgt: {curf.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}" ) # closing condition for outerloop on dual objective if outerloop == 4: break f = curf # get errors reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K) reproj_errors3 = util.getReprojError3(x_cam_gt, shape, R, T) rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T) reproj_error = reproj_errors2.mean() reconstruction_error = reproj_errors3.mean() rel_error = rel_errors.mean() f_error = torch.abs(fgt - f) / fgt allerror_3d.append(reproj_error.data.numpy()) allerror_2d.append(reconstruction_error.data.numpy()) allerror_rel3d.append(rel_error.data.numpy()) error_2d.append(reproj_error.cpu().data.item()) error_3d.append(reconstruction_error.cpu().data.item()) error_rel3d.append(rel_error.cpu().data.item()) error_relf.append(f_error.cpu().data.item()) print( f"f/sequence: {f_test}/{j} | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} | f_error_rel: {f_error.item():.4f} | rmse: {reconstruction_error.item():.4f} | rel rmse: {rel_error.item():.4f} | 2d error: {reproj_error.item():.4f}" ) #end for avg_2d = np.mean(error_2d) avg_rel3d = np.mean(error_rel3d) avg_3d = np.mean(error_3d) avg_relf = np.mean(error_relf) seterror_2d.append(avg_2d) seterror_3d.append(avg_3d) seterror_rel3d.append(avg_rel3d) seterror_relf.append(avg_relf) #end for break all_f = np.stack(all_f).flatten() all_d = np.stack(all_depth).flatten() allerror_2d = np.stack(allerror_2d).flatten() allerror_3d = np.stack(allerror_3d).flatten() allerror_rel3d = np.stack(allerror_rel3d).flatten() matdata = {} matdata['fvals'] = np.array(f_vals) matdata['all_f'] = np.array(all_f) matdata['all_d'] = np.array(all_depth) matdata['error_2d'] = allerror_2d matdata['error_3d'] = allerror_3d matdata['error_rel3d'] = allerror_rel3d matdata['seterror_2d'] = np.array(seterror_2d) matdata['seterror_3d'] = np.array(seterror_3d) matdata['seterror_rel3d'] = np.array(seterror_rel3d) matdata['seterror_relf'] = np.array(seterror_relf) scipy.io.savemat(outfile, matdata) print(f"MEAN seterror_2d: {np.mean(seterror_2d)}") print(f"MEAN seterror_3d: {np.mean(seterror_3d)}") print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}") print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")