def alignment_to_ground_truth(pc_pred, quat_pred, gt_pred, quat_gt): """ Computes rotation that aligns predicted point cloud with the ground truth one. Basic idea is that quat_pred * pc_pred = quat_gt * gt_pred, and hence the desired alignment can be computed as quat_gt^(-1) * quat_pred. This alignment is then used as an initialization to ICP, which further refines it. :param pc_pred: predicted point cloud :param quat_pred: predicted camera rotation :param gt_pred: ground truth point cloud :param quat_gt: ground truth camera rotation :return: alignment quaternion """ from util.quaternion import quaternion_normalise, quaternion_conjugate, \ quaternion_multiply, quat2mat, mat2quat, from_rotation_matrix, as_rotation_matrix quat_gt = quat_gt.reshape(1, 4) quat_pred = quat_pred.reshape(1, 4) quat_pred_norm = quaternion_normalise(quat_pred) quat_gt_norm = quaternion_normalise(quat_gt) quat_unrot = quaternion_multiply(quaternion_conjugate(quat_gt_norm), quat_pred_norm) init_rotation_np = np.squeeze(as_rotation_matrix(quat_unrot)) reg_p2p = open3d_icp(pc_pred.cpu().numpy(), gt_pred, init_rotation_np) T = np.array(reg_p2p.transformation) rot_matrix = T[:3, :3] assert (np.fabs(np.linalg.det(rot_matrix) - 1.0) <= 0.0001) rot_matrix = np.expand_dims(rot_matrix, 0) quat = from_rotation_matrix(rot_matrix) return quat, reg_p2p.inlier_rmse
def model_unrotate_points(cfg): """ un_q = quat_gt^(-1) * predicted_quat pc_unrot = un_q * pc_np * un_q^(-1) """ from util.quaternion import quaternion_normalise, quaternion_conjugate, \ quaternion_rotate, quaternion_multiply gt_quat = tf.placeholder(dtype=tf.float32, shape=[1, 4]) pred_quat_n = quaternion_normalise(pred_quat) gt_quat_n = quaternion_normalise(gt_quat) un_q = quaternion_multiply(quaternion_conjugate(gt_quat_n), pred_quat_n) pc_unrot = quaternion_rotate(input_pc, un_q) return input_pc, pred_quat, gt_quat, pc_unrot
def run_eval(): config = tf.ConfigProto( device_count={'GPU': 1} ) cfg = app_config exp_dir = cfg.checkpoint_dir num_views = cfg.num_views g = tf.Graph() with g.as_default(): quat_inp = tf.placeholder(dtype=tf.float64, shape=[1, 4]) quat_inp_2 = tf.placeholder(dtype=tf.float64, shape=[1, 4]) quat_conj = quaternion_conjugate(quat_inp) quat_mul = quaternion_multiply(quat_inp, quat_inp_2) sess = tf.Session(config=config) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) save_pred_name = "{}_{}".format(cfg.save_predictions_dir, cfg.eval_split) save_dir = os.path.join(exp_dir, cfg.save_predictions_dir) reference_rotation = scipy.io.loadmat("{}/final_reference_rotation.mat".format(exp_dir))["rotation"] ref_conj_np = sess.run(quat_conj, feed_dict={quat_inp: reference_rotation}) dataset = Dataset3D(cfg) num_models = dataset.num_samples() model_names = [] angle_error = np.zeros((num_models, num_views), dtype=np.float64) for model_idx in range(num_models): sample = dataset.data[model_idx] print("{}/{}".format(model_idx, num_models)) print(sample.name) model_names.append(sample.name) # mat_filename = "{}/{}_pc.mat".format(save_dir, sample.name) mat_filename = "{}/{}_pc".format(save_dir, sample.name) data = scipy.io.loadmat(mat_filename) all_cameras = data["camera_pose"] for view_idx in range(num_views): cam_pos = sample.cam_pos[view_idx, :] gt_quat_np = quaternion_from_campos(cam_pos) gt_quat_np = np.expand_dims(gt_quat_np, 0) pred_quat_np = all_cameras[view_idx, :] pred_quat_np /= np.linalg.norm(pred_quat_np) pred_quat_np = np.expand_dims(pred_quat_np, 0) pred_quat_aligned_np = sess.run(quat_mul, feed_dict={ quat_inp: pred_quat_np, quat_inp_2: ref_conj_np }) q1 = gt_quat_np q2 = pred_quat_aligned_np q1_conj = sess.run(quat_conj, feed_dict={quat_inp: q1}) q_diff = sess.run(quat_mul, feed_dict={quat_inp: q1_conj, quat_inp_2: q2}) ang_diff = 2 * np.arccos(q_diff[0, 0]) if ang_diff > np.pi: ang_diff -= 2*np.pi angle_error[model_idx, view_idx] = np.fabs(ang_diff) all_errors = np.reshape(angle_error, (-1)) angle_thresh_rad = cfg.pose_accuracy_threshold / 180.0 * np.pi correct = all_errors < angle_thresh_rad num_predictions = correct.shape[0] accuracy = np.count_nonzero(correct) / num_predictions median_error = np.sort(all_errors)[num_predictions // 2] median_error = median_error / np.pi * 180 print("accuracy:", accuracy, "median angular error:", median_error) scipy.io.savemat(os.path.join(exp_dir, "pose_error_{}.mat".format(save_pred_name)), {"angle_error": angle_error, "accuracy": accuracy, "median_error": median_error}) f = open(os.path.join(exp_dir, "pose_error_{}.txt".format(save_pred_name)), "w") f.write("{} {}\n".format(accuracy, median_error)) f.close()