def save_errors(_error_sign, _scene_errs): # Save the calculated errors to a JSON file. errors_path = p["out_errors_tpath"].format( eval_path=p["eval_path"], result_name=result_name, error_sign=_error_sign, scene_id=scene_id ) misc.ensure_dir(osp.dirname(errors_path)) misc.log("Saving errors to: {}".format(errors_path)) inout.save_json(errors_path, _scene_errs)
def write_text_on_image(im, txt_list, loc=(3, 0), color=(1.0, 1.0, 1.0), size=20): """Writes text info on an image. :param im: ndarray on which the text info will be written. :param txt_list: List of dictionaries, each describing one info line: - 'name': Entry name. - 'val': Entry value. - 'fmt': String format for the value. :param loc: Location of the top left corner of the text box. :param color: Font color. :param size: Font size. :return: Image with written text info. """ im_pil = Image.fromarray(im) # Load font. try: font_path = os.path.join(os.path.dirname(__file__), "droid_sans_mono.ttf") font = ImageFont.truetype(font_path, size) except IOError: misc.log("Warning: Loading a fallback font.") font = ImageFont.load_default() draw = ImageDraw.Draw(im_pil) for info in txt_list: if info["name"] != "": txt_tpl = "{}:{" + info["fmt"] + "}" else: txt_tpl = "{}{" + info["fmt"] + "}" txt = txt_tpl.format(info["name"], info["val"]) draw.text(loc, txt, fill=tuple([int(c * 255) for c in color]), font=font) text_width, text_height = font.getsize(txt) loc = (loc[0], loc[1] + text_height) del draw return np.array(im_pil)
for err_type in p["correct_th"]: p["correct_th"][err_type] = list(map(float, args.__dict__["correct_th_" + err_type].split(","))) p["normalized_by_diameter"] = args.normalized_by_diameter.split(",") p["normalized_by_im_width"] = args.normalized_by_im_width.split(",") p["visib_gt_min"] = float(args.visib_gt_min) p["error_dir_paths"] = args.error_dir_paths.split(",") p["eval_path"] = str(args.eval_path) p["datasets_path"] = str(args.datasets_path) p["targets_filename"] = str(args.targets_filename) p["error_tpath"] = str(args.error_tpath) p["out_matches_tpath"] = str(args.out_matches_tpath) p["out_scores_tpath"] = str(args.out_scores_tpath) misc.log("-----------") misc.log("Parameters:") for k, v in p.items(): misc.log("- {}: {}".format(k, v)) misc.log("-----------") setproctitle.setproctitle("eval_calc_scores_{}".format(p["error_tpath"])) # Calculation of the performance scores. # ------------------------------------------------------------------------------ for error_dir_path in p["error_dir_paths"]: misc.log("Processing: {}".format(error_dir_path)) time_start = time.time() # Parse info about the errors from the folder name. error_sign = osp.basename(error_dir_path)
p["n_top"] = int(args.n_top) p["error_type"] = str(args.error_type) p["vsd_deltas"] = {str(e.split(":")[0]): float(e.split(":")[1]) for e in args.vsd_deltas.split(",")} p["vsd_taus"] = list(map(float, args.vsd_taus.split(","))) p["vsd_normalized_by_diameter"] = bool(args.vsd_normalized_by_diameter) p["max_sym_disc_step"] = float(args.max_sym_disc_step) p["skip_missing"] = bool(args.skip_missing) p["renderer_type"] = str(args.renderer_type) p["result_filenames"] = args.result_filenames.split(",") p["results_path"] = str(args.results_path) p["eval_path"] = str(args.eval_path) p["datasets_path"] = str(args.datasets_path) p["targets_filename"] = str(args.targets_filename) p["out_errors_tpath"] = str(args.out_errors_tpath) misc.log("-----------") misc.log("Parameters:") for k, v in p.items(): misc.log("- {}: {}".format(k, v)) misc.log("-----------") setproctitle.setproctitle("eval_calc_errors_{}".format(p["error_type"])) def se3_mul(R1, T1, R2, T2): T1 = T1.reshape((3, 1)) T2 = T2.reshape((3, 1)) R_new = R1.dot(R2) T_new = (R1.dot(T2) + T1).reshape((3,)) return R_new, T_new
p["results_path"] = str(args.results_path) p["eval_path"] = str(args.eval_path) p["targets_filename"] = str(args.targets_filename) p["error_types"] = args.error_types.split(",") for e_type in p["error_types"]: assert e_type in KNOWN_ERROR_TYPES, f"Unknown error type: {e_type}" p["n_top"] = args.n_top # Evaluation. # ------------------------------------------------------------------------------ for result_filename in p["result_filenames"]: misc.log("===========") misc.log("EVALUATING: {}".format(result_filename)) misc.log("===========") time_start = time.time() # Volume under recall surface (VSD) / area under recall curve (MSSD, MSPD; AUCadd, AUCadi, AUCad). average_recalls = {} # Name of the result and the dataset. result_name = os.path.splitext(os.path.basename(result_filename))[0] dataset = str(result_name.split("_")[1].split("-")[0]) logger.set_logger_dir(osp.join(p["eval_path"], result_name), action="k") # Calculate the average estimation time per image.