示例#1
0
        '--vsd_normalized_by_diameter={}'.format(
          error['vsd_normalized_by_diameter'])
      ]

    misc.log('Running: ' + ' '.join(calc_errors_cmd))
    if subprocess.call(calc_errors_cmd) != 0:
      raise RuntimeError('Calculation of pose errors failed.')

    # Paths (rel. to p['eval_path']) to folders with calculated pose errors.
    # For VSD, there is one path for each setting of tau. For the other pose
    # error functions, there is only one path.
    error_dir_paths = {}
    if error['type'] == 'vsd':
      for vsd_tau in error['vsd_taus']:
        error_sign = misc.get_error_signature(
          error['type'], error['n_top'], vsd_delta=error['vsd_deltas'][dataset],
          vsd_tau=vsd_tau)
        error_dir_paths[error_sign] = os.path.join(result_name, error_sign)
    else:
      error_sign = misc.get_error_signature(error['type'], error['n_top'])
      error_dir_paths[error_sign] = os.path.join(result_name, error_sign)

    # Recall scores for all settings of the threshold of correctness (and also
    # of the misalignment tolerance tau in the case of VSD).
    recalls = []

    # Calculate performance scores.
    for error_sign, error_dir_path in error_dir_paths.items():
      for correct_th in error['correct_th']:

        calc_scores_cmd = [
示例#2
0
                eval_path=p['eval_path'],
                result_name=result_name,
                error_sign=_error_sign,
                scene_id=scene_id)
            misc.ensure_dir(os.path.dirname(errors_path))
            misc.log('Saving errors to: {}'.format(errors_path))
            inout.save_json(errors_path, _scene_errs)

        # Save the calculated errors.
        if p['error_type'] == 'vsd':

            # For VSD, save errors for each tau value to a different file.
            for vsd_tau_id, vsd_tau in enumerate(p['vsd_taus']):
                error_sign = misc.get_error_signature(
                    p['error_type'],
                    p['n_top'],
                    vsd_delta=p['vsd_deltas'][dataset],
                    vsd_tau=vsd_tau)

                # Keep only errors for the current tau.
                scene_errs_curr = copy.deepcopy(scene_errs)
                for err in scene_errs_curr:
                    for gt_id in err['errors'].keys():
                        err['errors'][gt_id] = [
                            err['errors'][gt_id][vsd_tau_id]
                        ]

                save_errors(error_sign, scene_errs_curr)
        else:
            error_sign = misc.get_error_signature(p['error_type'], p['n_top'])
            save_errors(error_sign, scene_errs)
示例#3
0
    def _derive_bop_results(self, out_dir, result_name, grasp_only, logger):
        """Derives BOP results.

    Args:
      out_dir: Path to the output directory.
      result_name: BOP result name. Should be the name of a folder under out_dir
        that contains output from BOP evaluation.
      grasp_only: Whether to derive results on grasped objects only.
      logger: Logger.

    Returns:
      A dictionary holding the results.
    """
        if grasp_only:
            set_str = 'grasp only'
        else:
            set_str = 'all'

        logger.info('Deriving results for *{}*'.format(set_str))

        average_recalls = {}
        average_recalls_obj = defaultdict(lambda: {})

        for error in self._p['errors']:

            error_dir_paths = {}
            if error['type'] == 'vsd':
                for vsd_tau in error['vsd_taus']:
                    error_sign = misc.get_error_signature(
                        error['type'],
                        error['n_top'],
                        vsd_delta=error['vsd_delta'],
                        vsd_tau=vsd_tau)
                    error_dir_paths[error_sign] = os.path.join(
                        result_name, error_sign)
            else:
                error_sign = misc.get_error_signature(error['type'],
                                                      error['n_top'])
                error_dir_paths[error_sign] = os.path.join(
                    result_name, error_sign)

            recalls = []
            recalls_obj = defaultdict(lambda: [])

            for error_sign, error_dir_path in error_dir_paths.items():
                for correct_th in error['correct_th']:

                    score_sign = misc.get_score_signature(
                        correct_th, self._p['visib_gt_min'])
                    matches_filename = "matches_{}.json".format(score_sign)
                    matches_path = os.path.join(out_dir, error_dir_path,
                                                matches_filename)

                    matches = inout.load_json(matches_path)

                    if grasp_only:
                        matches = [
                            m for m in matches if m['obj_id'] ==
                            self._grasp_id[m['scene_id']][m['im_id']]
                        ]

                    scores = score.calc_localization_scores(self._scene_ids,
                                                            self._obj_ids,
                                                            matches,
                                                            error['n_top'],
                                                            do_print=False)

                    recalls.append(scores['recall'])
                    for i, r in scores['obj_recalls'].items():
                        recalls_obj[i].append(r)

            average_recalls[error['type']] = np.mean(recalls)
            for i, r in recalls_obj.items():
                average_recalls_obj[i][error['type']] = np.mean(r)

        results = {i: r * 100 for i, r in average_recalls.items()}
        results['mean'] = np.mean(
            [results['vsd'], results['mssd'], results['mspd']])

        keys, values = tuple(zip(*results.items()))
        table = tabulate(
            [values],
            headers=keys,
            tablefmt='pipe',
            floatfmt='.3f',
            stralign='center',
            numalign='center',
        )
        logger.info('Evaluation results for *{}*: \n'.format(set_str) + table)

        results_per_object = {}
        for i, v in average_recalls_obj.items():
            res = {k: r * 100 for k, r in v.items()}
            res['mean'] = np.mean([res['vsd'], res['mssd'], res['mspd']])
            results_per_object[self._dataset.ycb_classes[i]] = res

        n_cols = 5
        results_tuple = [(k, v['vsd'], v['mssd'], v['mspd'], v['mean'])
                         for k, v in results_per_object.items()]
        results_flatten = list(itertools.chain(*results_tuple))
        results_2d = itertools.zip_longest(
            *[results_flatten[i::n_cols] for i in range(n_cols)])
        table = tabulate(
            results_2d,
            tablefmt='pipe',
            floatfmt='.3f',
            headers=['object', 'vsd', 'mssd', 'mspd', 'mean'] * (n_cols // 5),
            numalign='right',
        )
        logger.info('Per-object scores for *{}*: \n'.format(set_str) + table)

        results['per_obj'] = results_per_object

        return results