Пример #1
0
    def testReadRetrievalPredictionsWorks(self):
        # Define inputs.
        file_path = os.path.join(tf.test.get_temp_dir(),
                                 'retrieval_predictions.csv')
        with tf.gfile.GFile(file_path, 'w') as f:
            f.write('id,images\n')
            f.write('0123456789abcdef,fedcba9876543250 \n')
            f.write('0423456789abcdef,fedcba9876543260\n')
            f.write('0223456789abcdef,fedcba9876543210 fedcba9876543200 '
                    'fedcba9876543220\n')
            f.write('\n')
            f.write('0523456789abcdef,\n')
        public_ids = ['0223456789abcdef']
        private_ids = ['0323456789abcdef', '0423456789abcdef']
        ignored_ids = ['0123456789abcdef', '0523456789abcdef']

        # Run tested function.
        public_predictions, private_predictions = dataset_file_io.ReadPredictions(
            file_path, public_ids, private_ids, ignored_ids,
            dataset_file_io.RETRIEVAL_TASK_ID)

        # Define expected results.
        expected_public_predictions = {
            '0223456789abcdef':
            ['fedcba9876543210', 'fedcba9876543200', 'fedcba9876543220']
        }
        expected_private_predictions = {
            '0423456789abcdef': ['fedcba9876543260']
        }

        # Compare actual and expected results.
        self.assertEqual(public_predictions, expected_public_predictions)
        self.assertEqual(private_predictions, expected_private_predictions)
Пример #2
0
def main(argv):
  if len(argv) > 1:
    raise RuntimeError('Too many command-line arguments.')

  # Read solution.
  print('Reading solution...')
  public_solution, private_solution, ignored_ids = dataset_file_io.ReadSolution(
      cmd_args.solution_path, dataset_file_io.RETRIEVAL_TASK_ID)
  print('done!')

  # Read predictions.
  print('Reading predictions...')
  public_predictions, private_predictions = dataset_file_io.ReadPredictions(
      cmd_args.predictions_path, set(public_solution.keys()),
      set(private_solution.keys()), set(ignored_ids),
      dataset_file_io.RETRIEVAL_TASK_ID)
  print('done!')

  # Mean average precision.
  print('**********************************************')
  print('(Public)  Mean Average Precision: %f' %
        metrics.MeanAveragePrecision(public_predictions, public_solution))
  print('(Private) Mean Average Precision: %f' %
        metrics.MeanAveragePrecision(private_predictions, private_solution))

  # Mean precision@k.
  print('**********************************************')
  public_precisions = 100.0 * metrics.MeanPrecisions(public_predictions,
                                                     public_solution)
  private_precisions = 100.0 * metrics.MeanPrecisions(private_predictions,
                                                      private_solution)
  print('(Public)  Mean precisions: P@1: %.2f, P@5: %.2f, P@10: %.2f, '
        'P@50: %.2f, P@100: %.2f' %
        (public_precisions[0], public_precisions[4], public_precisions[9],
         public_precisions[49], public_precisions[99]))
  print('(Private) Mean precisions: P@1: %.2f, P@5: %.2f, P@10: %.2f, '
        'P@50: %.2f, P@100: %.2f' %
        (private_precisions[0], private_precisions[4], private_precisions[9],
         private_precisions[49], private_precisions[99]))

  # Mean/median position of first correct.
  print('**********************************************')
  public_mean_position, public_median_position = metrics.MeanMedianPosition(
      public_predictions, public_solution)
  private_mean_position, private_median_position = metrics.MeanMedianPosition(
      private_predictions, private_solution)
  print('(Public)  Mean position: %.2f, median position: %.2f' %
        (public_mean_position, public_median_position))
  print('(Private) Mean position: %.2f, median position: %.2f' %
        (private_mean_position, private_median_position))
def main(argv):
    if len(argv) > 1:
        raise RuntimeError('Too many command-line arguments.')

    # Read solution.
    print('Reading solution...')
    public_solution, private_solution, ignored_ids = dataset_file_io.ReadSolution(
        cmd_args.solution_path, dataset_file_io.RECOGNITION_TASK_ID)
    print('done!')

    # Read predictions.
    print('Reading predictions...')
    public_predictions, private_predictions = dataset_file_io.ReadPredictions(
        cmd_args.predictions_path, set(public_solution.keys()),
        set(private_solution.keys()), set(ignored_ids),
        dataset_file_io.RECOGNITION_TASK_ID)
    print('done!')

    # Global Average Precision.
    print('**********************************************')
    print('(Public)  Global Average Precision: %f' %
          metrics.GlobalAveragePrecision(public_predictions, public_solution))
    print(
        '(Private) Global Average Precision: %f' %
        metrics.GlobalAveragePrecision(private_predictions, private_solution))

    # Global Average Precision ignoring non-landmark queries.
    print('**********************************************')
    print(
        '(Public)  Global Average Precision ignoring non-landmark queries: %f'
        % metrics.GlobalAveragePrecision(public_predictions,
                                         public_solution,
                                         ignore_non_gt_test_images=True))
    print(
        '(Private) Global Average Precision ignoring non-landmark queries: %f'
        % metrics.GlobalAveragePrecision(private_predictions,
                                         private_solution,
                                         ignore_non_gt_test_images=True))

    # Top-1 accuracy.
    print('**********************************************')
    print('(Public)  Top-1 accuracy: %.2f' %
          (100.0 * metrics.Top1Accuracy(public_predictions, public_solution)))
    print(
        '(Private) Top-1 accuracy: %.2f' %
        (100.0 * metrics.Top1Accuracy(private_predictions, private_solution)))
Пример #4
0
    def testReadRecognitionPredictionsWorks(self):
        # Define inputs.
        file_path = os.path.join(tf.test.get_temp_dir(),
                                 'recognition_predictions.csv')
        with tf.gfile.GFile(file_path, 'w') as f:
            f.write('id,landmarks\n')
            f.write('0123456789abcdef,12 0.1 \n')
            f.write('0423456789abcdef,0 19.0\n')
            f.write('0223456789abcdef,\n')
            f.write('\n')
            f.write('0523456789abcdef,14 0.01\n')
        public_ids = ['0123456789abcdef', '0223456789abcdef']
        private_ids = ['0423456789abcdef']
        ignored_ids = ['0323456789abcdef', '0523456789abcdef']

        # Run tested function.
        public_predictions, private_predictions = dataset_file_io.ReadPredictions(
            file_path, public_ids, private_ids, ignored_ids,
            dataset_file_io.RECOGNITION_TASK_ID)

        # Define expected results.
        expected_public_predictions = {
            '0123456789abcdef': {
                'class': 12,
                'score': 0.1
            }
        }
        expected_private_predictions = {
            '0423456789abcdef': {
                'class': 0,
                'score': 19.0
            }
        }

        # Compare actual and expected results.
        self.assertEqual(public_predictions, expected_public_predictions)
        self.assertEqual(private_predictions, expected_private_predictions)