def get_scores(checkpoint_dir): saved_json_file = os.path.join(checkpoint_dir, 'opt.json') train_opt = utils.load_opt_from_json(saved_json_file) name = train_opt['name'] data_dir = '../../../data/ActiveVisionDataset/' + train_opt[ 'data_dir'].split('/')[-1] subsample_rate = train_opt['subsample_rate'] traj = train_opt['traj'] print(data_dir) # load ground truth poses dataset = AVD(data_dir, traj, subsample_rate) gt_pose = dataset.gt gt_location = gt_pose[:, :3] pts = dataset.point_clouds #.numpy() #np.save("local.npy",pts) #np.save("pose.npy",gt_pose) # load predicted poses pred_file = os.path.join(opt.checkpoint_dir, 'pose_est.npy') pred_pose = np.load(pred_file) pred_location = pred_pose[:, :2] * dataset.depth_scale # denormalization pred_location = add_y_coord_for_evaluation(pred_location) #print(gt_pose) print(pred_pose) ate, aligned_location = utils.compute_ate(pred_location, gt_location) print('{}, ate: {}'.format(name, ate)) gt_pose[:, :3] = gt_pose[:, :3] / dataset.depth_scale gt_yaw = np.arccos( gt_pose[:, 5] / np.sqrt(gt_pose[:, 3] * gt_pose[:, 3] + gt_pose[:, 5] * gt_pose[:, 5])) gt_pose_xzth = np.vstack((gt_pose[:, 0], gt_pose[:, 2], -gt_yaw)).transpose() colors = np.array([[0, 1, 1], [0, 0, 0], [0, 0, 1], [1, 0, 1], [0.5, 0.5, 0.5], [0, 0.5, 0], [0, 1, 0], [0.5, 0, 0], [0, 0, 0.5], [0.5, 0.5, 0], [0.5, 0, 0.5], [1, 0, 0], [0.75, 0.75, 0.75], [0, 0.5, 0.5], [1, 1, 1], [1, 1, 0]]) #path_or = '../../../bk_origin/DeepMapping/' global_point_cloud_file = os.path.join(opt.checkpoint_dir, 'obs_global_est.npy') #global_point_cloud_file_or = os.path.join(path_or,global_point_cloud_file[3:]) pcds_ours = utils.load_obs_global_est(global_point_cloud_file, colors) #pcds_or = utils.load_obs_global_est(global_point_cloud_file_or,colors) pts_gt = transform_to_global_AVD(gt_pose_xzth, pts).numpy() #pts_or = np.load(global_point_cloud_file_or) pts_ours = np.load(global_point_cloud_file) pts_gt = pts_gt.reshape((16, -1, 3)) print(pt_diff(pts_gt, pts_ours)) print(pt_diff(pts_gt, pts_or))
parser.add_argument('--log_interval', type=int, default=10, help='logging interval of saving results') opt = parser.parse_args() checkpoint_dir = os.path.join('../results/AVD', opt.name) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) utils.save_opt(checkpoint_dir, opt) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print('loading dataset') dataset = AVD(opt.data_dir, opt.traj, opt.subsample_rate) loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False) loss_fn = eval('loss.' + opt.loss) print('creating model') model = DeepMapping_AVD(loss_fn=loss_fn, n_samples=opt.n_samples).to(device) optimizer = optim.Adam(model.parameters(), lr=opt.lr) latent_vecs = [] mask_vecs_pair = [] for i in range(len(dataset)): vec = tini.xavier_normal_(torch.ones(latent_size)).to(device) vec = torch.nn.Parameter(vec) #True latent_vecs.append(vec)
y = np.zeros_like(x) z = pred_pos_DM[:,1] return np.stack((x,y,z),axis=-1) parser = argparse.ArgumentParser() parser.add_argument('-c','--checkpoint_dir',type=str,required=True,help='path to results folder') opt = parser.parse_args() saved_json_file = os.path.join(opt.checkpoint_dir,'opt.json') train_opt = utils.load_opt_from_json(saved_json_file) name = train_opt['name'] data_dir = train_opt['data_dir'] subsample_rate = train_opt['subsample_rate'] traj = train_opt['traj'] # load ground truth poses dataset = AVD(data_dir,traj,subsample_rate) gt_pose = dataset.gt gt_location = gt_pose[:,:3] # load predicted poses pred_file = os.path.join(opt.checkpoint_dir,opt.loss+'_pose_est.npy') pred_pose = np.load(pred_file) pred_location = pred_pose[:,:2] * dataset.depth_scale # denormalization pred_location = add_y_coord_for_evaluation(pred_location) # compute absolute trajectory error (ATE) ate,aligned_location = utils.compute_ate(pred_location,gt_location) print('{}, ate: {}'.format(name,ate)) # vis results global_point_cloud_file = os.path.join(opt.checkpoint_dir,opt.loss+'_obs_global_est.npy')