Пример #1
0
  time_start = time.time()

  # Parse info about the errors from the folder name.
  error_sign = os.path.basename(error_dir_path)
  err_type = str(error_sign.split('_')[0].split('=')[1])
  n_top = int(error_sign.split('_')[1].split('=')[1])
  result_info = os.path.basename(os.path.dirname(error_dir_path)).split('_')
  method = result_info[0]
  dataset_info = result_info[1].split('-')
  dataset = dataset_info[0]
  split = dataset_info[1]
  split_type = dataset_info[2] if len(dataset_info) > 2 else None

  # Evaluation signature.
  score_sign = misc.get_score_signature(
    p['correct_th'][err_type], p['visib_gt_min'])

  misc.log('Calculating score - error: {}, method: {}, dataset: {}.'.format(
    err_type, method, dataset))

  # Load dataset parameters.
  dp_split = dataset_params.get_split_params(
    p['datasets_path'], dataset, split, split_type)

  model_type = 'eval'
  dp_model = dataset_params.get_model_params(
    p['datasets_path'], dataset, model_type)

  # Load info about the object models.
  models_info = inout.load_json(dp_model['models_info_path'], keys_to_int=True)
Пример #2
0
          os.path.join('scripts', 'eval_calc_scores.py'),
          '--error_dir_paths={}'.format(error_dir_path),
          '--eval_path={}'.format(p['eval_path']),
          '--targets_filename={}'.format(p['targets_filename']),
          '--visib_gt_min={}'.format(p['visib_gt_min'])
        ]

        calc_scores_cmd += ['--correct_th_{}={}'.format(
          error['type'], ','.join(map(str, correct_th)))]

        misc.log('Running: ' + ' '.join(calc_scores_cmd))
        if subprocess.call(calc_scores_cmd) != 0:
          raise RuntimeError('Calculation of scores failed.')

        # Path to file with calculated scores.
        score_sign = misc.get_score_signature(correct_th, p['visib_gt_min'])

        scores_filename = 'scores_{}.json'.format(score_sign)
        scores_path = os.path.join(
          p['eval_path'], result_name, error_sign, scores_filename)
        
        # Load the scores.
        misc.log('Loading calculated scores from: {}'.format(scores_path))
        scores = inout.load_json(scores_path)
        recalls.append(scores['recall'])

    average_recalls[error['type']] = np.mean(recalls)

    misc.log('Recall scores: {}'.format(' '.join(map(str, recalls))))
    misc.log('Average recall: {}'.format(average_recalls[error['type']]))
Пример #3
0
    def _derive_bop_results(self, out_dir, result_name, grasp_only, logger):
        """Derives BOP results.

    Args:
      out_dir: Path to the output directory.
      result_name: BOP result name. Should be the name of a folder under out_dir
        that contains output from BOP evaluation.
      grasp_only: Whether to derive results on grasped objects only.
      logger: Logger.

    Returns:
      A dictionary holding the results.
    """
        if grasp_only:
            set_str = 'grasp only'
        else:
            set_str = 'all'

        logger.info('Deriving results for *{}*'.format(set_str))

        average_recalls = {}
        average_recalls_obj = defaultdict(lambda: {})

        for error in self._p['errors']:

            error_dir_paths = {}
            if error['type'] == 'vsd':
                for vsd_tau in error['vsd_taus']:
                    error_sign = misc.get_error_signature(
                        error['type'],
                        error['n_top'],
                        vsd_delta=error['vsd_delta'],
                        vsd_tau=vsd_tau)
                    error_dir_paths[error_sign] = os.path.join(
                        result_name, error_sign)
            else:
                error_sign = misc.get_error_signature(error['type'],
                                                      error['n_top'])
                error_dir_paths[error_sign] = os.path.join(
                    result_name, error_sign)

            recalls = []
            recalls_obj = defaultdict(lambda: [])

            for error_sign, error_dir_path in error_dir_paths.items():
                for correct_th in error['correct_th']:

                    score_sign = misc.get_score_signature(
                        correct_th, self._p['visib_gt_min'])
                    matches_filename = "matches_{}.json".format(score_sign)
                    matches_path = os.path.join(out_dir, error_dir_path,
                                                matches_filename)

                    matches = inout.load_json(matches_path)

                    if grasp_only:
                        matches = [
                            m for m in matches if m['obj_id'] ==
                            self._grasp_id[m['scene_id']][m['im_id']]
                        ]

                    scores = score.calc_localization_scores(self._scene_ids,
                                                            self._obj_ids,
                                                            matches,
                                                            error['n_top'],
                                                            do_print=False)

                    recalls.append(scores['recall'])
                    for i, r in scores['obj_recalls'].items():
                        recalls_obj[i].append(r)

            average_recalls[error['type']] = np.mean(recalls)
            for i, r in recalls_obj.items():
                average_recalls_obj[i][error['type']] = np.mean(r)

        results = {i: r * 100 for i, r in average_recalls.items()}
        results['mean'] = np.mean(
            [results['vsd'], results['mssd'], results['mspd']])

        keys, values = tuple(zip(*results.items()))
        table = tabulate(
            [values],
            headers=keys,
            tablefmt='pipe',
            floatfmt='.3f',
            stralign='center',
            numalign='center',
        )
        logger.info('Evaluation results for *{}*: \n'.format(set_str) + table)

        results_per_object = {}
        for i, v in average_recalls_obj.items():
            res = {k: r * 100 for k, r in v.items()}
            res['mean'] = np.mean([res['vsd'], res['mssd'], res['mspd']])
            results_per_object[self._dataset.ycb_classes[i]] = res

        n_cols = 5
        results_tuple = [(k, v['vsd'], v['mssd'], v['mspd'], v['mean'])
                         for k, v in results_per_object.items()]
        results_flatten = list(itertools.chain(*results_tuple))
        results_2d = itertools.zip_longest(
            *[results_flatten[i::n_cols] for i in range(n_cols)])
        table = tabulate(
            results_2d,
            tablefmt='pipe',
            floatfmt='.3f',
            headers=['object', 'vsd', 'mssd', 'mspd', 'mean'] * (n_cols // 5),
            numalign='right',
        )
        logger.info('Per-object scores for *{}*: \n'.format(set_str) + table)

        results['per_obj'] = results_per_object

        return results