def print_trans_rot_errors(gts, obj_id, ts_est, ts_est_old, Rs_est, Rs_est_old): t_errs = [] obj_gts = [] for gt in gts: if gt["obj_id"] == obj_id: t_errs.append(ts_est[0] - gt["cam_t_m2c"].squeeze()) obj_gts.append(gt) min_t_err_idx = np.argmin(np.linalg.norm(np.array(t_errs), axis=1)) print(min_t_err_idx) print(np.array(t_errs).shape) print(len(obj_gts)) gt = obj_gts[min_t_err_idx].copy() try: print("Translation Error before refinement") print(ts_est_old[0] - gt["cam_t_m2c"].squeeze()) print("Translation Error after refinement") print(t_errs[min_t_err_idx]) print("Rotation Error before refinement") print(pose_error.re(Rs_est_old[0], gt["cam_R_m2c"])) print("Rotation Error after refinement") R_err = pose_error.re(Rs_est[0], gt["cam_R_m2c"]) print(R_err) except: pass return (t_errs[min_t_err_idx], R_err)
def get_closest_rot(rot_est, rot_gt, sym_info): """get the closest rot_gt given rot_est and sym_info. rot_est: ndarray rot_gt: ndarray sym_info: None or Kx3x3 ndarray, m2m """ if sym_info is None: return rot_gt if isinstance(sym_info, torch.Tensor): sym_info = sym_info.cpu().numpy() if len(sym_info.shape) == 2: sym_info = sym_info.reshape((1, 3, 3)) # find the closest rot_gt with smallest re r_err = re(rot_est, rot_gt) closest_rot_gt = rot_gt for i in range(sym_info.shape[0]): # R_gt_m2c x R_sym_m2m ==> R_gt_sym_m2c rot_gt_sym = rot_gt.dot(sym_info[i]) cur_re = re(rot_est, rot_gt_sym) if cur_re < r_err: r_err = cur_re closest_rot_gt = rot_gt_sym return closest_rot_gt
def get_closest_pose(est_rot, gt_rot, sym_info): # sym_info: (sym_axis, sym_angle) [0:3, 3] def gen_mat(axis, degree): axis = axis / LA.norm(axis) return quat2mat( [ cos(degree / 360.0 * pi), axis[0] * sin(degree / 360.0 * pi), axis[1] * sin(degree / 360.0 * pi), axis[2] * sin(degree / 360.0 * pi), ] ) sym_angle = int(sym_info[3]) sym_axis = np.copy(sym_info[:3]) if sym_angle == -1: closest_rot = gt_rot elif sym_angle == 0: angle = 180.0 gt_rot_1 = np.copy(gt_rot) rd_1 = re(gt_rot_1, est_rot) gt_rot_2 = np.matmul(gt_rot, gen_mat(sym_axis, angle)) rd_2 = re(gt_rot_2, est_rot) if rd_1 < rd_2: gt_rot_1 = np.matmul(gt_rot, gen_mat(sym_axis, -90)) gt_rot_2 = np.matmul(gt_rot, gen_mat(sym_axis, 90)) else: gt_rot_1 = np.matmul(gt_rot, gen_mat(sym_axis, 90)) gt_rot_2 = np.matmul(gt_rot, gen_mat(sym_axis, 270)) rd_1 = re(gt_rot_1, est_rot) rd_2 = re(gt_rot_2, est_rot) count = 1 thresh = 0.1 while angle > thresh: angle /= 2 count += 1 if rd_1 < rd_2: gt_rot_2 = np.matmul(gt_rot_2, gen_mat(sym_axis, -angle)) rd_2 = re(gt_rot_2, est_rot) else: gt_rot_1 = np.matmul(gt_rot_1, gen_mat(sym_axis, angle)) rd_1 = re(gt_rot_1, est_rot) # print("rd_1: {}, rd_2: {}, angle: {}, count: {}".format(rd_1, rd_2, angle, count)) closest_rot = gt_rot_1 if rd_1 < rd_2 else gt_rot_2 else: assert 180 % sym_angle == 0 rot_delta = gen_mat(sym_axis, sym_angle) cur_rot = np.copy(gt_rot) closest_rot = np.copy(cur_rot) closest_angle = re(cur_rot, est_rot) for i in range(180 / sym_angle): cur_rot = np.matmul(cur_rot, rot_delta) rd = re(cur_rot, est_rot) if rd < closest_angle: closest_rot = np.copy(cur_rot) closest_angle = rd return closest_rot
def compute_mean_re_te(pred_transes, pred_rots, gt_transes, gt_rots): pred_transes = pred_transes.detach().cpu().numpy() pred_rots = pred_rots.detach().cpu().numpy() gt_transes = gt_transes.detach().cpu().numpy() gt_rots = gt_rots.detach().cpu().numpy() bs = pred_rots.shape[0] R_errs = np.zeros((bs,), dtype=np.float32) T_errs = np.zeros((bs,), dtype=np.float32) for i in range(bs): R_errs[i] = re(pred_rots[i], gt_rots[i]) T_errs[i] = te(pred_transes[i], gt_transes[i]) return R_errs.mean(), T_errs.mean()
trans = np.array([-0.0021458883, 0.0804758, 0.78142926]) axis_est = np.array([1, 2, 0]) axis_gt = np.array([0, 2, 1]) est_rot = axangle2mat(axis_est, -pi / 3) gt_rot = axangle2mat(axis_gt, pi) transforms_sym = get_symmetry_transformations(models_info[cls_idx], max_sym_disc_step=0.01) # sym_info = axangle2mat([0, 0, 1], pi) sym_info = np.array([sym["R"] for sym in transforms_sym]) print("sym_info", sym_info.shape) est_pose = np.random.rand(3, 4) est_pose[:, :3] = est_rot gt_pose = np.random.rand(3, 4) gt_pose[:, :3] = gt_rot rd_ori = re(est_rot, gt_rot) t = time.perf_counter() for i in range(3000): closest_rot = get_closest_rot(est_rot, gt_rot, sym_info) print(("calculate closest rot {}s".format((time.perf_counter() - t) / 3000))) closest_pose = np.copy(gt_pose) closest_pose[:, :3] = closest_rot rd_closest = re(est_rot, closest_pose[:, :3]) print(("rot_est: {}, rot_gt: {}, closest rot_gt: {}".format( mat2axangle(est_rot), mat2axangle(gt_rot), mat2axangle(closest_rot)))) print(("original rot dist: {}, closest rot dist: {}".format(rd_ori, rd_closest))) est_img, _ = renderer.render(obj_id, est_rot, trans) gt_img, _ = renderer.render(obj_id, gt_rot, trans)
def _eval_predictions_precision(self): """NOTE: eval precision instead of recall Evaluate self._predictions on 6d pose. Return results with the metrics of the tasks. """ self._logger.info("Eval results ...") cfg = self.cfg method_name = f"{cfg.EXP_ID.replace('_', '-')}" cache_path = osp.join(self._output_dir, f"{method_name}_{self.dataset_name}_preds.pkl") if osp.exists(cache_path) and self.use_cache: self._logger.info("load cached predictions") self._predictions = mmcv.load(cache_path) else: if hasattr(self, "_predictions"): mmcv.dump(self._predictions, cache_path) else: raise RuntimeError("Please run inference first") precisions = OrderedDict() errors = OrderedDict() self.get_gts() error_names = ["ad", "re", "te", "proj"] metric_names = [ "ad_2", "ad_5", "ad_10", "rete_2", "rete_5", "rete_10", "re_2", "re_5", "re_10", "te_2", "te_5", "te_10", "proj_2", "proj_5", "proj_10", ] for obj_name in self.gts: if obj_name not in self._predictions: continue cur_label = self.obj_names.index(obj_name) if obj_name not in precisions: precisions[obj_name] = OrderedDict() for metric_name in metric_names: precisions[obj_name][metric_name] = [] if obj_name not in errors: errors[obj_name] = OrderedDict() for err_name in error_names: errors[obj_name][err_name] = [] ################# obj_gts = self.gts[obj_name] obj_preds = self._predictions[obj_name] for file_name, gt_anno in obj_gts.items(): # compute precision as in DPOD paper if file_name not in obj_preds: # no pred found # NOTE: just ignore undetected continue # compute each metric R_pred = obj_preds[file_name]["R"] t_pred = obj_preds[file_name]["t"] R_gt = gt_anno["R"] t_gt = gt_anno["t"] t_error = te(t_pred, t_gt) if obj_name in cfg.DATASETS.SYM_OBJS: R_gt_sym = get_closest_rot( R_pred, R_gt, self._metadata.sym_infos[cur_label]) r_error = re(R_pred, R_gt_sym) proj_2d_error = arp_2d( R_pred, t_pred, R_gt_sym, t_gt, pts=self.models_3d[cur_label]["pts"], K=gt_anno["K"]) ad_error = adi(R_pred, t_pred, R_gt, t_gt, pts=self.models_3d[self.obj_names.index( obj_name)]["pts"]) else: r_error = re(R_pred, R_gt) proj_2d_error = arp_2d( R_pred, t_pred, R_gt, t_gt, pts=self.models_3d[cur_label]["pts"], K=gt_anno["K"]) ad_error = add(R_pred, t_pred, R_gt, t_gt, pts=self.models_3d[self.obj_names.index( obj_name)]["pts"]) ######### errors[obj_name]["ad"].append(ad_error) errors[obj_name]["re"].append(r_error) errors[obj_name]["te"].append(t_error) errors[obj_name]["proj"].append(proj_2d_error) ############ precisions[obj_name]["ad_2"].append( float(ad_error < 0.02 * self.diameters[cur_label])) precisions[obj_name]["ad_5"].append( float(ad_error < 0.05 * self.diameters[cur_label])) precisions[obj_name]["ad_10"].append( float(ad_error < 0.1 * self.diameters[cur_label])) # deg, cm precisions[obj_name]["rete_2"].append( float(r_error < 2 and t_error < 0.02)) precisions[obj_name]["rete_5"].append( float(r_error < 5 and t_error < 0.05)) precisions[obj_name]["rete_10"].append( float(r_error < 10 and t_error < 0.1)) precisions[obj_name]["re_2"].append(float(r_error < 2)) precisions[obj_name]["re_5"].append(float(r_error < 5)) precisions[obj_name]["re_10"].append(float(r_error < 10)) precisions[obj_name]["te_2"].append(float(t_error < 0.02)) precisions[obj_name]["te_5"].append(float(t_error < 0.05)) precisions[obj_name]["te_10"].append(float(t_error < 0.1)) # px precisions[obj_name]["proj_2"].append(float(proj_2d_error < 2)) precisions[obj_name]["proj_5"].append(float(proj_2d_error < 5)) precisions[obj_name]["proj_10"].append( float(proj_2d_error < 10)) # summarize obj_names = sorted(list(precisions.keys())) header = ["objects"] + obj_names + [f"Avg({len(obj_names)})"] big_tab = [header] for metric_name in metric_names: line = [metric_name] this_line_res = [] for obj_name in obj_names: res = precisions[obj_name][metric_name] if len(res) > 0: line.append(f"{100 * np.mean(res):.2f}") this_line_res.append(np.mean(res)) else: line.append(0.0) this_line_res.append(0.0) # mean if len(obj_names) > 0: line.append(f"{100 * np.mean(this_line_res):.2f}") big_tab.append(line) for error_name in ["re", "te"]: line = [error_name] this_line_res = [] for obj_name in obj_names: res = errors[obj_name][error_name] if len(res) > 0: line.append(f"{np.mean(res):.2f}") this_line_res.append(np.mean(res)) else: line.append(float("nan")) this_line_res.append(float("nan")) # mean if len(obj_names) > 0: line.append(f"{np.mean(this_line_res):.2f}") big_tab.append(line) ### log big table self._logger.info("precisions") res_log_tab_str = tabulate( big_tab, tablefmt="plain", # floatfmt=floatfmt ) self._logger.info("\n{}".format(res_log_tab_str)) errors_cache_path = osp.join( self._output_dir, f"{method_name}_{self.dataset_name}_errors.pkl") recalls_cache_path = osp.join( self._output_dir, f"{method_name}_{self.dataset_name}_precisions.pkl") self._logger.info(f"{errors_cache_path}") self._logger.info(f"{recalls_cache_path}") mmcv.dump(errors, errors_cache_path) mmcv.dump(precisions, recalls_cache_path) dump_tab_name = osp.join( self._output_dir, f"{method_name}_{self.dataset_name}_tab_precisions.txt") with open(dump_tab_name, "w") as f: f.write("{}\n".format(res_log_tab_str)) if self._distributed: self._logger.warning( "\n The current evaluation on multi-gpu is not correct, run with single-gpu instead." ) return {}
elif p["error_type"] == "projS": # sym-aware arp2d proj_2d_err = pose_error.arp_2d_sym( R_e, t_e, R_g, t_g, pts=models[obj_id]["pts"], K=K, syms=models_sym[obj_id] ) e = [proj_2d_err] elif p["error_type"] == "cus": if sphere_projections_overlap: e = [ pose_error.cus(R_e, t_e, R_g, t_g, K, ren, obj_id, renderer_type=p["renderer_type"]) ] else: e = [1.0] elif p["error_type"] == "rete": r_err = pose_error.re(R_e, R_g) t_err = pose_error.te(t_e, t_g) / 10 # mm to cm e = [r_err, t_err] elif p["error_type"] == "reteS": r_err = pose_error.re_sym(R_e, R_g, syms=models_sym[obj_id]) t_err = pose_error.te_sym(t_e, t_g, R_gt=R_g, syms=models_sym[obj_id]) / 10 e = [r_err, t_err] elif p["error_type"] == "re": r_err = pose_error.re(R_e, R_g) e = [r_err] elif p["error_type"] == "reS": r_err = pose_error.re_sym(R_e, R_g, syms=models_sym[obj_id]) e = [r_err]