Exemple #1
0
def get_all_scenes_for_obj(eval_args):
    workspace_path = os.environ.get("AE_WORKSPACE_PATH")
    dataset_path = u.get_dataset_path(workspace_path)

    dataset_name = eval_args.get("DATA", "DATASET")
    cam_type = eval_args.get("DATA", "CAM_TYPE")
    try:
        obj_id = eval_args.getint("DATA", "OBJ_ID")
    except:
        obj_id = eval(eval_args.get("DATA", "OBJECTS"))[0]

    cfg_string = str(dataset_name)
    current_config_hash = hashlib.md5(cfg_string).hexdigest()
    current_file_name = os.path.join(dataset_path,
                                     current_config_hash + ".npy")

    if os.path.exists(current_file_name):
        obj_scene_dict = np.load(current_file_name).item()
    else:
        p = dataset_params.get_dataset_params(dataset_name,
                                              model_type="",
                                              train_type="",
                                              test_type=cam_type,
                                              cam_type=cam_type)

        obj_scene_dict = {}
        scene_gts = []
        for scene_id in range(1, p["scene_count"] + 1):
            print(scene_id)
            scene_gts.append(
                inout.load_yaml(p["scene_gt_mpath"].format(scene_id)))

        for obj in range(1, p["obj_count"] + 1):
            eval_scenes = set()
            for scene_i, scene_gt in enumerate(scene_gts):
                for view_gt in scene_gt[0]:
                    if view_gt["obj_id"] == obj:
                        eval_scenes.add(scene_i + 1)
            obj_scene_dict[obj] = list(eval_scenes)
        np.save(current_file_name, obj_scene_dict)
    print(obj_scene_dict)

    eval_scenes = obj_scene_dict[obj_id]

    return eval_scenes
Exemple #2
0
def plot_re_rect_occlusion(eval_dir,
                           scene_ids,
                           all_test_visibs,
                           bins=10,
                           top_n=1):
    # top_n_eval = eval_args.getint('EVALUATION', 'TOP_N_EVAL')
    # top_n = eval_args.getint('METRIC', 'TOP_N')
    # if top_n_eval < 1:
    #     return

    all_angle_errs = []
    for scene_id in scene_ids:

        if not os.path.exists(
                os.path.join(eval_dir, "error=re_ntop=%s" % top_n,
                             "errors_{:02d}.yml".format(scene_id))):
            print("WARNING: " +
                  osp.join(eval_dir, "error=re_ntop=%s" %
                           top_n, "errors_{:02d}.yml".format(scene_id)) +
                  " not found")
            continue

        angle_errs_dict = inout.load_yaml(
            os.path.join(eval_dir, "error=re_ntop=%s" % top_n,
                         "errors_{:02d}.yml".format(scene_id)))
        all_angle_errs += [
            list(angle_e["errors"].values())[0] for angle_e in angle_errs_dict
        ]

    if len(all_angle_errs) == 0:
        return
    all_angle_errs = np.array(all_angle_errs)
    # print( all_vsd_errs)

    fig = plt.figure()  # noqa
    plt.grid()
    plt.ylabel("rot err [deg]")
    plt.xlabel("visibility [percent]")
    # plt.axis((-0.1, 1.1, -0.1, 1.1))
    # plt.xlim((0.0, 1.0))
    # plt.ylim((0.0, 1.0))

    total_views = len(all_angle_errs) / top_n
    angle_errs_rect = np.empty((total_views, ))

    for view in range(total_views):
        top_n_errors = all_angle_errs[view * top_n:(view + 1) * top_n]
        angle_errs_rect[view] = np.min(
            [top_n_errors[0], 180 - top_n_errors[0]])

    bounds = np.linspace(0, 1, bins + 1)
    bin_angle_errs = []
    bin_count = []

    for idx in range(bins):
        bin_idcs = np.where((all_test_visibs > bounds[idx])
                            & (all_test_visibs < bounds[idx + 1]))
        # median_angle_err[idx] = np.median(angle_errs_rect[bin_idcs])
        bin_angle_errs.append(angle_errs_rect[bin_idcs])
        bin_count.append(len(bin_idcs[0]))

    middle_bin_vis = bounds[:-1] + (bounds[1] - bounds[0]) / 2.0
    # plt.bar(middle_bin_vis,median_angle_err,0.5/bins)
    plt.boxplot(bin_angle_errs,
                positions=middle_bin_vis,
                widths=0.5 / bins,
                sym="+")

    # count_str = 'bin count ' + bins * '%s '
    # count_str = count_str % tuple(bin_count)
    plt.title("Visibility vs Median Rectified Rotation Error" + str(bin_count))
    tikz_save(
        os.path.join(eval_dir, "latex", "R_err_occlusion.tex"),
        figurewidth="0.45\\textheight",
        figureheight="0.45\\textheight",
        show_info=False,
    )
Exemple #3
0
def get_gt_scene_crops(scene_id, eval_args, train_args):

    dataset_name = eval_args.get("DATA", "DATASET")
    cam_type = eval_args.get("DATA", "CAM_TYPE")
    icp = eval_args.getboolean("EVALUATION", "ICP")

    delta = eval_args.get("METRIC", "VSD_DELTA")

    workspace_path = os.environ.get("AE_WORKSPACE_PATH")
    dataset_path = u.get_dataset_path(workspace_path)

    H = train_args.getint("Dataset", "H")

    cfg_string = str([scene_id] + eval_args.items("DATA") +
                     eval_args.items("BBOXES") + [H])
    current_config_hash = hashlib.md5(cfg_string).hexdigest()

    current_file_name = os.path.join(dataset_path,
                                     current_config_hash + ".npz")

    if os.path.exists(current_file_name):
        data = np.load(current_file_name)
        test_img_crops = data["test_img_crops"].item()
        test_img_depth_crops = data["test_img_depth_crops"].item()
        bb_scores = data["bb_scores"].item()
        bb_vis = data["visib_gt"].item()
        bbs = data["bbs"].item()
    if not osp.exists(current_file_name) or len(test_img_crops) == 0 or len(
            test_img_depth_crops) == 0:
        test_imgs = load_scenes(scene_id, eval_args)
        test_imgs_depth = load_scenes(scene_id, eval_args,
                                      depth=True) if icp else None

        data_params = dataset_params.get_dataset_params(dataset_name,
                                                        model_type="",
                                                        train_type="",
                                                        test_type=cam_type,
                                                        cam_type=cam_type)

        # only available for primesense, sixdtoolkit can generate
        visib_gt = inout.load_yaml(data_params["scene_gt_stats_mpath"].format(
            scene_id, delta))

        bb_gt = inout.load_gt(data_params["scene_gt_mpath"].format(scene_id))

        test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis = generate_scene_crops(
            test_imgs,
            test_imgs_depth,
            bb_gt,
            eval_args,
            train_args,
            visib_gt=visib_gt)

        np.savez(
            current_file_name,
            test_img_crops=test_img_crops,
            test_img_depth_crops=test_img_depth_crops,
            bbs=bbs,
            bb_scores=bb_scores,
            visib_gt=bb_vis,
        )

        current_cfg_file_name = os.path.join(dataset_path,
                                             current_config_hash + ".cfg")
        with open(current_cfg_file_name, "w") as f:
            f.write(cfg_string)
        print("created new ground truth crops!")
    else:
        print("loaded previously generated ground truth crops!")
        print(len(test_img_crops), len(test_img_depth_crops))

    return (test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis)
Exemple #4
0
def plot_vsd_occlusion(eval_dir,
                       scene_ids,
                       all_test_visibs,
                       top_n=1,
                       delta=15,
                       tau=20,
                       cost="step",
                       bins=10):

    # top_n_eval = eval_args.getint('EVALUATION', 'TOP_N_EVAL')
    # top_n = eval_args.getint('METRIC', 'TOP_N')
    # delta = eval_args.getint('METRIC', 'VSD_DELTA')
    # tau = eval_args.getint('METRIC', 'VSD_TAU')
    # cost = eval_args.get('METRIC', 'VSD_COST')

    # if top_n_eval < 1:
    #     return

    all_vsd_errs = []
    for scene_id in scene_ids:
        error_file_path = osp.join(
            eval_dir,
            "error=vsd_ntop=%s_delta=%s_tau=%s_cost=%s" %
            (top_n, delta, tau, cost),
            "errors_{:02d}.yml".format(scene_id),
        )

        if not osp.exists(error_file_path):
            print("WARNING: " + error_file_path + " not found")
            continue

        vsd_dict = inout.load_yaml(error_file_path)
        all_vsd_errs += [
            list(vsd_e["errors"].values())[0] for vsd_e in vsd_dict
        ]

    if len(all_vsd_errs) == 0:
        return
    all_vsd_errs = np.array(all_vsd_errs)

    # print( all_vsd_errs)

    fig = plt.figure()  # noqa
    # ax = plt.axes()
    # ax.set_xlim((0.0,1.0))
    # ax.set_ylim((0.0,1.0))
    ax = plt.gca()
    ax.set_xlim((0.0, 1.0))
    plt.grid()
    plt.ylabel("vsd err")
    plt.xlabel("visibility [percent]")
    # plt.xlim((0.0, 1.0))
    # plt.ylim((0.0, 1.0))

    total_views = len(all_vsd_errs) / top_n
    vsd_errs = np.empty((total_views, ))

    for view in range(total_views):
        top_n_errors = all_vsd_errs[view * top_n:(view + 1) * top_n]
        vsd_errs[view] = top_n_errors[0]

    bounds = np.linspace(0, 1, bins + 1)
    bin_vsd_errs = []
    bin_count = []

    for idx in range(bins):
        bin_idcs = np.where((all_test_visibs > bounds[idx])
                            & (all_test_visibs < bounds[idx + 1]))
        bin_vsd_errs.append(vsd_errs[bin_idcs])
        bin_count.append(len(bin_idcs[0]))

    middle_bin_vis = bounds[:-1] + (bounds[1] - bounds[0]) / 2.0
    # plt.bar(middle_bin_vis,mean_vsd_err,0.5/bins)

    plt.boxplot(bin_vsd_errs,
                positions=middle_bin_vis,
                widths=0.5 / bins,
                sym="+")

    # count_str = 'bin count ' + bins * '%s '
    # count_str = count_str % tuple(bin_count)
    plt.title("Visibility vs Mean VSD Error" + str(bin_count))
    tikz_save(
        os.path.join(eval_dir, "latex", "vsd_occlusion.tex"),
        figurewidth="0.45\\textheight",
        figureheight="0.45\\textheight",
        show_info=False,
    )
Exemple #5
0
def plot_vsd_err_hist(eval_dir,
                      scene_ids,
                      obj_id,
                      dataset_name="linemod",
                      top_n=1,
                      delta=15,
                      tau=20,
                      cost="step",
                      cam_type="primesense"):
    # top_n_eval = eval_args.getint('EVALUATION', 'TOP_N_EVAL')
    # top_n = eval_args.getint('METRIC', 'TOP_N')
    # delta = eval_args.getint('METRIC', 'VSD_DELTA')
    # tau = eval_args.getint('METRIC', 'VSD_TAU')
    # cost = eval_args.get('METRIC', 'VSD_COST')
    # cam_type = eval_args.get('DATA', 'cam_type')
    # dataset_name = eval_args.get('DATA', 'dataset')
    # obj_id = eval_args.getint('DATA', 'obj_id')
    # if top_n_eval < 1:
    #     return

    data_params = dataset_params.get_dataset_params(dataset_name,
                                                    model_type="",
                                                    train_type="",
                                                    test_type=cam_type,
                                                    cam_type=cam_type)

    vsd_errs = []
    for scene_id in scene_ids:
        if dataset_name in ["linemod", "hinterstoisser"]:
            # NOTE: linemod scene_id == obj_id
            if obj_id != scene_id:
                continue

        error_file_path = osp.join(
            eval_dir,
            "error=vsd_ntop=%s_delta=%s_tau=%s_cost=%s" %
            (top_n, delta, tau, cost),
            "errors_{:02d}.yml".format(scene_id),
        )

        if not osp.exists(error_file_path):
            print("WARNING: " + error_file_path + " not found")
            continue
        gts = inout.load_gt(data_params["scene_gt_mpath"].format(scene_id))
        visib_gts = inout.load_yaml(data_params["scene_gt_stats_mpath"].format(
            scene_id, 15))  # delta=15
        vsd_dict = inout.load_yaml(error_file_path)
        for view, vsd_e in enumerate(vsd_dict):
            vsds = vsd_dict[view * top_n:(view + 1) * top_n]
            for gt, visib_gt in zip(gts[view], visib_gts[view]):
                if gt["obj_id"] == obj_id:
                    if visib_gt["visib_fract"] > 0.1:
                        for vsd_e in vsds:
                            vsd_errs += [list(vsd_e["errors"].values())[0]]

    if len(vsd_errs) == 0:
        return
    vsd_errs = np.array(vsd_errs)
    logger.info("vsd errs: {}".format(len(vsd_errs)))

    fig = plt.figure()  # noqa
    ax = plt.gca()
    ax.set_xlim((0.0, 1.0))
    plt.grid()
    plt.xlabel("vsd err")
    plt.ylabel("recall")
    plt.title("obj: {}, VSD Error vs Recall".format(obj_id))
    legend = []

    for n in np.unique(np.array([top_n, 1])):

        total_views = int(len(vsd_errs) / top_n)
        min_vsd_errs = np.empty((total_views, ))

        for view in range(total_views):
            top_n_errors = vsd_errs[view * top_n:(view + 1) * top_n]
            if n == 1:
                top_n_errors = top_n_errors[np.newaxis, 0]
            min_vsd_errs[view] = np.min(top_n_errors)

        min_vsd_errs_sorted = np.sort(min_vsd_errs)
        recall = np.float32(np.arange(total_views) + 1.0) / total_views

        # fill curve
        min_vsd_errs_sorted = np.hstack((min_vsd_errs_sorted, np.array([1.0])))
        recall = np.hstack((recall, np.array([1.0])))

        AUC_vsd = np.trapz(recall, min_vsd_errs_sorted)
        plt.plot(min_vsd_errs_sorted, recall)

        legend += ["top {0} vsd err, AUC = {1:.4f}".format(n, AUC_vsd)]
        logger.info("obj:{} top {} vsd err, AUC = {:.4f}".format(
            obj_id, n, AUC_vsd))
    plt.legend(legend)
    out_file = osp.join(eval_dir, "latex",
                        "vsd_err_hist_obj_{:02d}.tex".format(obj_id))
    mkdir_p(osp.dirname(out_file))
    logger.info(osp.basename(out_file))
    tikz_save(out_file,
              figurewidth="0.45\\textheight",
              figureheight="0.45\\textheight",
              show_info=False)
Exemple #6
0
def plot_R_err_hist(eval_args, eval_dir, scene_ids):

    top_n_eval = eval_args.getint("EVALUATION", "TOP_N_EVAL")
    top_n = eval_args.getint("METRIC", "TOP_N")
    cam_type = eval_args.get("DATA", "cam_type")
    dataset_name = eval_args.get("DATA", "dataset")
    obj_id = eval_args.getint("DATA", "obj_id")

    if top_n_eval < 1:
        return

    data_params = dataset_params.get_dataset_params(dataset_name,
                                                    model_type="",
                                                    train_type="",
                                                    test_type=cam_type,
                                                    cam_type=cam_type)

    angle_errs = []
    for scene_id in scene_ids:
        error_file_path = os.path.join(eval_dir, "error=re_ntop=%s" % top_n,
                                       "errors_{:02d}.yml".format(scene_id))
        if not os.path.exists(error_file_path):
            print("WARNING: " + error_file_path + " not found")
            continue
        # angle_errs_dict = inout.load_yaml(error_file_path)
        # angle_errs += [list(angle_e['errors'].values())[0] for angle_e in angle_errs_dict]

        gts = inout.load_gt(data_params["scene_gt_mpath"].format(scene_id))
        visib_gts = inout.load_yaml(data_params["scene_gt_stats_mpath"].format(
            scene_id, 15))
        re_dict = inout.load_yaml(error_file_path)

        for view in range(len(gts)):
            res = re_dict[view * top_n:(view + 1) * top_n]
            for gt, visib_gt in zip(gts[view], visib_gts[view]):
                if gt["obj_id"] == obj_id:
                    if visib_gt["visib_fract"] > 0.1:
                        for re_e in res:
                            angle_errs += [list(re_e["errors"].values())[0]]

    if len(angle_errs) == 0:
        return

    angle_errs = np.array(angle_errs)

    fig = plt.figure()  # noqa
    plt.grid()
    plt.xlabel("angle err [deg]")
    plt.ylabel("recall")
    plt.title("Angle Error vs Recall")
    legend = []

    for n in np.unique(np.array([top_n, 1])):

        total_views = len(angle_errs) / top_n
        min_angle_errs = np.empty((total_views, ))
        min_angle_errs_rect = np.empty((total_views, ))

        for view in range(total_views):
            top_n_errors = angle_errs[view * top_n:(view + 1) * top_n]
            if n == 1:
                top_n_errors = top_n_errors[np.newaxis, 0]
            min_angle_errs[view] = np.min(top_n_errors)
            min_angle_errs_rect[view] = np.min(
                np.hstack((top_n_errors, 180 - top_n_errors)))

        min_angle_errs_sorted = np.sort(min_angle_errs)
        min_angle_errs_rect_sorted = np.sort(min_angle_errs_rect)
        recall = (np.arange(total_views) + 1.0) / total_views

        # fill curve
        min_angle_errs_sorted = np.hstack(
            (min_angle_errs_sorted, np.array([180.0])))
        min_angle_errs_rect_sorted = np.hstack(
            (min_angle_errs_rect_sorted, np.array([90.0])))
        recall = np.hstack((recall, np.array([1.0])))

        AUC_angle = np.trapz(recall, min_angle_errs_sorted / 180.0)
        AUC_angle_rect = np.trapz(recall, min_angle_errs_rect_sorted / 90.0)

        plt.plot(min_angle_errs_sorted, recall)
        plt.plot(min_angle_errs_rect_sorted, recall)

        legend += [
            "top {0} angle err, AUC = {1:.4f}".format(n, AUC_angle),
            "top {0} rectified angle err, AUC = {1:.4f}".format(
                n, AUC_angle_rect),
        ]
    plt.legend(legend)
    tikz_save(
        osp.join(eval_dir, "latex", "R_err_hist.tex"),
        figurewidth="0.45\\textheight",
        figureheight="0.45\\textheight",
        show_info=False,
    )
Exemple #7
0
def match_and_eval_performance_scores(
    eval_dir,
    error_types=["vsd", "re", "te"],
    error_thresh={
        "vsd": 0.3,
        "cou": 0.5,
        "te": 5.0,
        "re": 5.0
    },  # cm  # deg
    error_thresh_fact={
        "add": 0.1,
        "adi": 0.1
    },
    dataset="linemod",
    cam_type="primesense",
    n_top=1,
    vsd_delta=15,
    vsd_tau=20,
    vsd_cost="step",
    method="",
    image_subset="bb8",
):
    """
    error_thresh_fact: Factor k; threshold of correctness = k * d, where d is the object diameter
    """
    # Paths to pose errors (calculated using eval_calc_errors.py)
    # ---------------------------------------------------------------------------
    # error_bpath = '/path/to/eval/'
    # error_paths = [
    #     pjoin(error_bpath, 'hodan-iros15_hinterstoisser'),
    #     # pjoin(error_bpath, 'hodan-iros15_tless_primesense'),
    # ]
    assert image_subset in ["sixd_v1", "bb8", "None"]
    test_type = cam_type

    idx_th = 0
    idx_thf = 0

    for error_type in error_types:
        # Error signature
        error_sign = "error:" + error_type + "_ntop:" + str(n_top)
        if error_type == "vsd":
            error_sign += "_delta:{}_tau:{}_cost:{}".format(
                vsd_delta, vsd_tau, vsd_cost)

        error_path = osp.join(eval_dir, error_sign)
        # error_dir = 'error=vsd_ntop=1_delta=15_tau=20_cost=step'
        # Other paths
        # ---------------------------------------------------------------------------
        # Mask of path to the input file with calculated errors
        errors_mpath = pjoin("{error_path}", "errors_{scene_id:02d}.yml")

        # Mask of path to the output file with established matches and calculated scores
        matches_mpath = pjoin("{error_path}", "matches_{eval_sign}.yml")
        scores_mpath = pjoin("{error_path}", "scores_{eval_sign}.yml")

        # Parameters
        # ---------------------------------------------------------------------------
        # use_image_subset = True  # Whether to use the specified subset of images
        require_all_errors = False  # Whether to break if some errors are missing
        visib_gt_min = 0.1  # Minimum visible surface fraction of valid GT pose
        visib_delta = 15  # [mm]

        # # Threshold of correctness
        # error_thresh = {
        #     'vsd': 0.3,
        #     'cou': 0.5,
        #     'te': 5.0, # [cm]
        #     're': 5.0 # [deg]
        # }

        # # Factor k; threshold of correctness = k * d, where d is the object diameter
        # error_thresh_fact = {
        #     'add': 0.1,
        #     'adi': 0.1
        # }

        # Evaluation
        # ---------------------------------------------------------------------------

        # Evaluation signature
        if error_type in ["add", "adi"]:
            if type(error_thresh_fact[error_type]) is list:
                cur_thres_f = error_thresh_fact[error_type][idx_thf]
                idx_thf += 1
            else:
                cur_thres_f = error_thresh_fact[error_type]
            eval_sign = "thf:" + str(cur_thres_f)
        else:
            if type(error_thresh[error_type]) is list:
                cur_thres = error_thresh[error_type][idx_th]
                idx_th += 1
            else:
                cur_thres = error_thresh[error_type]
            eval_sign = "th:" + str(cur_thres)
        eval_sign += "_min-visib:" + str(visib_gt_min)

        logger.info("--- Processing: {}, {}, {}".format(
            method, dataset, error_type))

        # Load dataset parameters
        dp = get_dataset_params(dataset, test_type=test_type)
        obj_ids = range(1, dp["obj_count"] + 1)
        scene_ids = range(1, dp["scene_count"] + 1)

        # Subset of images to be considered
        if image_subset == "sixd_v1":
            logger.info("use image subset: {}".format(dp["test_set_fpath"]))
            im_ids_sets = inout.load_yaml(dp["test_set_fpath"])
            total_imgs = sum([len(v) for k, v in im_ids_sets.items()])
            logger.info(
                "total number of imgs in test set: {}".format(total_imgs))
        elif image_subset == "bb8":
            logger.info("use image subset: {}".format(
                dp["test_set_bb8_fpath"]))
            im_ids_sets = inout.load_yaml(dp["test_set_bb8_fpath"])
            total_imgs = sum([len(v) for k, v in im_ids_sets.items()])
            logger.info("total number of imgs in test set: {}".format(
                total_imgs))  # 13425
        else:
            im_ids_sets = None

        # Set threshold of correctness (might be different for each object)
        error_threshs = {}
        if error_type in ["add", "adi"]:
            # Relative to object diameter
            models_info = inout.load_yaml(dp["models_info_path"])
            for obj_id in obj_ids:
                obj_diameter = models_info[obj_id]["diameter"]
                error_threshs[obj_id] = cur_thres_f * obj_diameter
        else:
            # The same threshold for all objects
            for obj_id in obj_ids:
                error_threshs[obj_id] = cur_thres

        # Go through the test scenes and match estimated poses to GT poses
        # -----------------------------------------------------------------------
        matches = []  # Stores info about the matching estimate for each GT
        for scene_id in scene_ids:
            # Load GT poses
            gts = inout.load_gt(dp["scene_gt_mpath"].format(scene_id))

            # Load statistics (e.g. visibility fraction) of the GT poses
            gt_stats_path = dp["scene_gt_stats_mpath"].format(
                scene_id, visib_delta)
            gt_stats = inout.load_yaml(gt_stats_path)

            # Keep the GT poses and their stats only for the selected images
            if im_ids_sets is not None:
                if scene_id not in im_ids_sets.keys():
                    continue
                im_ids = im_ids_sets[scene_id]
                gts = {im_id: gts[im_id] for im_id in im_ids}
                gt_stats = {im_id: gt_stats[im_id] for im_id in im_ids}

            # Load pre-calculated errors of the pose estimates
            scene_errs_path = errors_mpath.format(error_path=error_path,
                                                  scene_id=scene_id)

            if osp.isfile(scene_errs_path):
                logger.info("loading error file: {}".format(scene_errs_path))
                errs = inout.load_errors(scene_errs_path)
                matches += match_poses(gts, gt_stats, errs, scene_id,
                                       visib_gt_min, error_threshs, n_top)
            elif require_all_errors:
                raise IOError(
                    "{} is missing, but errors for all scenes are required"
                    " (require_all_results = True).".format(scene_errs_path))

        # Calculate the performance scores
        # -----------------------------------------------------------------------
        # Split the dataset of Hinterstoisser to the original LINEMOD dataset
        # and the Occlusion dataset by TUD (i.e. the extended GT for scene #2)
        if dataset in ["hinterstoisser", "linemod", "linemod_occ"]:
            if dataset in ["linemod", "hinterstoisser"]:
                logger.info("-- LINEMOD dataset")
                eval_sign_lm = "linemod_" + eval_sign
                matches_lm = [
                    m for m in matches if m["scene_id"] == m["obj_id"]
                ]
                scores_lm = calc_scores(scene_ids,
                                        obj_ids,
                                        matches_lm,
                                        n_top,
                                        dataset=dataset)

                # Save scores
                scores_lm_path = scores_mpath.format(error_path=error_path,
                                                     eval_sign=eval_sign_lm)
                inout.save_yaml(scores_lm_path, scores_lm)

                # Save matches
                matches_path = matches_mpath.format(error_path=error_path,
                                                    eval_sign=eval_sign_lm)
                inout.save_yaml(matches_path, matches_lm)
            elif dataset == "linemod_occ":
                logger.info("-- Occlusion dataset")
                eval_sign_occ = "occlusion_" + eval_sign
                matches_occ = [m for m in matches if m["scene_id"] == 2]
                scene_ids_occ = [2]
                # obj_ids_occ = [1, 2, 5, 6, 8, 9, 10, 11, 12]
                obj_ids_occ = [1, 5, 6, 8, 9, 10, 11, 12]
                scores_occ = calc_scores(scene_ids_occ,
                                         obj_ids_occ,
                                         matches_occ,
                                         n_top,
                                         dataset=dataset)
                # Save scores
                scores_occ_path = scores_mpath.format(error_path=error_path,
                                                      eval_sign=eval_sign_occ)
                inout.save_yaml(scores_occ_path, scores_occ)

                # Save matches
                matches_path = matches_mpath.format(error_path=error_path,
                                                    eval_sign=eval_sign_occ)
                inout.save_yaml(matches_path, matches_occ)
        else:
            scores = calc_scores(scene_ids, obj_ids, matches, n_top)

            # Save scores
            scores_path = scores_mpath.format(error_path=error_path,
                                              eval_sign=eval_sign)
            inout.save_yaml(scores_path, scores)

            # Save matches
            matches_path = matches_mpath.format(error_path=error_path,
                                                eval_sign=eval_sign)
            inout.save_yaml(matches_path, matches)

    logger.info("Done.")
Exemple #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("experiment_group")
    parser.add_argument("--eval_name", default="*", required=False)
    args = parser.parse_args()

    experiment_group = args.experiment_group
    eval_name = args.eval_name
    print(eval_name)

    workspace_path = os.environ.get("AE_WORKSPACE_PATH")

    exp_group_path = os.path.join(workspace_path, "experiments",
                                  experiment_group)
    print(exp_group_path)
    error_score_files = glob.glob(
        os.path.join(exp_group_path, "*/eval", eval_name, "*/error*/scores*"))
    print(error_score_files)
    data_re = []
    data_auc_re = []
    data_auc_rerect = []
    data_te = []
    data_vsd = []
    data_cou = []
    data_add = []
    data_adi = []
    data_proj = []
    data_paper_vsd = {}
    data_paper_auc = {}
    latex_content = []

    for error_score_file in error_score_files:
        split_path = error_score_file.split("/")
        exp_name = split_path[-6]
        eval_name = split_path[-4]
        occl = "occlusion" if "occlusion" in error_score_file else ""
        test_data = split_path[-3]
        error_type = split_path[-2].split("_")[0].split("=")[1]
        print(error_type)
        topn = split_path[-2].split("=")[2].split("_")[0]
        error_thres = split_path[-1].split("=")[1].split("_")[0]

        eval_cfg_file_path = os.path.join(workspace_path, "experiments",
                                          experiment_group, exp_name, "eval",
                                          eval_name, test_data, "*.cfg")
        eval_cfg_file_pathes = glob.glob(eval_cfg_file_path)
        if len(eval_cfg_file_pathes) == 0:
            continue
        else:
            eval_cfg_file_path = eval_cfg_file_pathes[0]

        eval_args = configparser.ConfigParser()
        eval_args.read(eval_cfg_file_path)
        print(eval_cfg_file_path)
        estimate_bbs = eval_args.getboolean("BBOXES", "ESTIMATE_BBS")
        try:
            obj_id = eval_args.getint("DATA", "OBJ_ID")
        except:
            obj_id = eval(eval_args.get("DATA", "OBJECTS"))[0]

        scenes = eval_utils.get_all_scenes_for_obj(eval_args)

        data = [item[1] for item in eval_args.items("DATA")]
        data[2] = (eval(eval_args.get("DATA", "SCENES"))
                   if len(eval(eval_args.get("DATA", "SCENES"))) > 0 else
                   eval_utils.get_all_scenes_for_obj(eval_args))

        # print( str(data))

        error_score_dict = inout.load_yaml(error_score_file)
        try:
            sixd_recall = error_score_dict["obj_recalls"][obj_id]
        except:
            continue

        if error_type == "re":
            data_re.append({
                "exp_name": exp_name,
                "eval_name": eval_name,
                "error_type": error_type,
                "thres": error_thres,
                "top": topn,
                "sixd_recall": sixd_recall,
                "EST_BBS": estimate_bbs,
                "eval_data": str(data[:2] + [occl]),
                "eval_scenes": str(data[2]),
                "eval_obj": str(data[3]),
            })
            err_file = os.path.join(
                os.path.dirname(os.path.dirname(error_score_file)),
                "latex/R_err_hist.tex")
            try:
                with open(err_file, "r") as f:
                    for line in f:
                        if re.match("(.*)legend entries(.*)", line):
                            auc_re = float(line.split("=")[2].split("}")[0])
                            auc_rerect = float(
                                line.split("=")[3].split("}")[0])

                data_auc_re.append({
                    "exp_name": exp_name,
                    "eval_name": eval_name,
                    "error_type": "auc_re",
                    "thres": "None",
                    "top": topn,
                    "sixd_recall": auc_re,
                    "EST_BBS": estimate_bbs,
                    "eval_data": str(data[:2] + [occl]),
                    "eval_scenes": str(data[2]),
                    "eval_obj": str(data[3]),
                })
                data_auc_rerect.append({
                    "exp_name": exp_name,
                    "eval_name": eval_name,
                    "error_type": "auc_rerect",
                    "thres": "None",
                    "top": topn,
                    "sixd_recall": auc_rerect,
                    "EST_BBS": estimate_bbs,
                    "eval_data": str(data[:2] + [occl]),
                    "eval_scenes": str(data[2]),
                    "eval_obj": str(data[3]),
                })

                if not data_paper_auc.has_key(int(data[3])):
                    data_paper_auc[int(data[3])] = {}
                    data_paper_auc[int(data[3])]["eval_obj"] = int(data[3])
                data_paper_auc[int(
                    data[3])][eval_name + "_" + "auc_re" + "_" +
                              str(data[1])] = float(auc_re) * 100
                data_paper_auc[int(
                    data[3])][eval_name + "_" + "auc_rerect" + "_" +
                              str(data[1])] = (float(auc_rerect) * 100)
            except:
                print(err_file, "not found")

        elif error_type == "te":
            data_te.append({
                "exp_name": exp_name,
                "eval_name": eval_name,
                "error_type": error_type,
                "thres": error_thres,
                "top": topn,
                "sixd_recall": sixd_recall,
                "EST_BBS": estimate_bbs,
                "eval_data": str(data[:2] + [occl]),
                "eval_scenes": str(data[2]),
                "eval_obj": str(data[3]),
            })
        elif error_type == "vsd":
            data_vsd.append({
                "exp_name":
                exp_name,
                "eval_name":
                eval_name,
                "error_type":
                error_type,
                "thres":
                error_thres,
                "top":
                topn,
                "sixd_recall":
                sixd_recall,
                "EST_BBS":
                estimate_bbs,
                "eval_data":
                str(data[:2] + [occl]),
                "eval_scenes":
                str(data[2]),
                "eval_obj":
                int(data[3]) if "[" not in data[3] else eval(data[3])[0],
            })
            if not data_paper_vsd.has_key(int(data[3])):
                data_paper_vsd[int(data[3])] = {}
                data_paper_vsd[int(data[3])]["eval_obj"] = int(data[3])
            data_paper_vsd[int(
                data[3])][eval_name + "_" + error_type + "_" +
                          str(data[1])] = float(sixd_recall) * 100

        elif error_type == "cou":
            data_cou.append({
                "exp_name": exp_name,
                "eval_name": eval_name,
                "error_type": error_type,
                "thres": error_thres,
                "top": topn,
                "sixd_recall": sixd_recall,
                "EST_BBS": estimate_bbs,
                "eval_data": str(data[:2] + [occl]),
                "eval_scenes": str(data[2]),
                "eval_obj": str(data[3]),
            })
        elif error_type == "add":
            data_add.append({
                "exp_name": exp_name,
                "eval_name": eval_name,
                "error_type": error_type,
                "thres": error_thres,
                "top": topn,
                "sixd_recall": sixd_recall,
                "EST_BBS": estimate_bbs,
                "eval_data": str(data[:2] + [occl]),
                "eval_scenes": str(data[2]),
                "eval_obj": str(data[3]),
            })
        elif error_type == "proj":
            data_proj.append({
                "exp_name": exp_name,
                "eval_name": eval_name,
                "error_type": error_type,
                "thres": error_thres,
                "top": topn,
                "sixd_recall": sixd_recall,
                "EST_BBS": estimate_bbs,
                "eval_data": str(data[:2] + [occl]),
                "eval_scenes": str(data[2]),
                "eval_obj": str(data[3]),
            })
        elif error_type == "adi":
            data_adi.append({
                "exp_name": exp_name,
                "eval_name": eval_name,
                "error_type": error_type,
                "thres": error_thres,
                "top": topn,
                "sixd_recall": sixd_recall,
                "EST_BBS": estimate_bbs,
                "eval_data": str(data[:2] + [occl]),
                "eval_scenes": str(data[2]),
                "eval_obj": str(data[3]),
            })
        else:
            print("error not known: ", error_type)

    if len(data_re) > 0:
        df_re = pd.DataFrame(data_re).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_re.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_auc_re) > 0:
        df_re = pd.DataFrame(data_auc_re).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_re.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_auc_rerect) > 0:
        df_re = pd.DataFrame(data_auc_rerect).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_re.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_te) > 0:
        df_te = pd.DataFrame(data_te).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_te.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_cou) > 0:
        df_cou = pd.DataFrame(data_cou).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_cou.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_add) > 0:
        df_add = pd.DataFrame(data_add).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_add.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_proj) > 0:
        df_proj = pd.DataFrame(data_proj).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_proj.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_adi) > 0:
        df_adi = pd.DataFrame(data_adi).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_adi.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_paper_vsd) > 0:
        df_paper = pd.DataFrame.from_dict(data_paper_vsd, orient="index")
        cols = ["eval_obj"] + [col for col in df_paper if col != "eval_obj"]
        df_paper = df_paper[cols]
        df_paper = df_paper.sort_index(axis=1)
        df_paper.loc["mean"] = df_paper.mean(axis=0)
        # df_paper.loc['mean'][0] = 0

        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_list = df_paper.to_latex(index=False,
                                       multirow=True,
                                       float_format="%.2f").splitlines()
        latex_list.insert(len(latex_list) - 3, "\midrule")
        latex_new = "\n".join(latex_list)
        latex_content.append(latex_new)
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_paper_auc) > 0:
        df_paper = pd.DataFrame.from_dict(data_paper_auc, orient="index")
        cols = ["eval_obj"] + [col for col in df_paper if col != "eval_obj"]
        df_paper = df_paper[cols]
        df_paper = df_paper.sort_index(axis=1)
        df_paper.loc["mean"] = df_paper.mean(axis=0)
        # df_paper.loc['mean'][0] = 0

        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_list = df_paper.to_latex(index=False,
                                       multirow=True,
                                       float_format="%.2f").splitlines()
        latex_list.insert(len(latex_list) - 3, "\midrule")
        latex_new = "\n".join(latex_list)
        latex_content.append(latex_new)
        latex_content.append("\\end{adjustbox}")
        latex_content.append("\n")
        latex_content.append("\n")
    if len(data_vsd) > 0:
        df_vsd = pd.DataFrame(data_vsd).sort_values(
            by=["eval_obj", "eval_name", "eval_data", "sixd_recall"])
        latex_content.append("\\begin{adjustbox}{max width=\\textwidth}")
        latex_content.append(df_vsd.to_latex(index=False, multirow=True))
        latex_content.append("\\end{adjustbox}")

    latex_content = "".join(latex_content)

    full_filename = os.path.join(exp_group_path, "latex", "report.tex")
    if not os.path.exists(os.path.join(exp_group_path, "latex")):
        os.makedirs(os.path.join(exp_group_path, "latex"))

    with open(full_filename, "w") as f:
        f.write(prolog % (time.ctime(), experiment_group.replace("_", "\_")))
        f.write(latex_content)
        f.write(epilog)

    from subprocess import check_output, Popen

    check_output(["pdflatex", "report.tex"],
                 cwd=os.path.dirname(full_filename))
    Popen(["okular", "report.pdf"], cwd=os.path.dirname(full_filename))

    print("finished")