Beispiel #1
0
    def test(self):
        scores = np.ones(self.det_gt.shape[0])
        dets = self.det_gt.append_column('score', scores)

        scores = np.ones(self.d.get_det_gt_for_class('A').shape[0])
        dets_just_A = self.d.get_det_gt_for_class('A')
        dets_just_A = dets_just_A.append_column('score', scores)

        self.d.set_values('uniform')
        assert (np.all(self.d.values == 1. / 3 * np.ones(len(self.classes))))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.33333333333333))

        self.d.set_values('inverse_prior')
        assert (np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.25))
Beispiel #2
0
    def __init__(self,
                 test_dataset,
                 train_dataset,
                 weights_dataset_name=None,
                 **kwargs):
        """
    Initialize the DatasetPolicy, getting it ready to run on the whole dataset
    or a single image.
    - test, train, weights datasets should be:
      - val,  train,    val:    for training the weights
      - test, trainval, val:    for final run
    - **kwargs update the default config
    """
        config = copy.copy(DatasetPolicy.default_config)
        config.update(kwargs)

        self.dataset = test_dataset
        self.train_dataset = train_dataset
        if not weights_dataset_name:
            weights_dataset_name = self.dataset.name
        self.weights_dataset_name = weights_dataset_name

        self.__dict__.update(config)
        print("DatasetPolicy running with:")
        pprint(self.__dict__)
        self.ev = Evaluation(self)
        self.tt = ut.TicToc()

        # Determine inference mode:
        # fixed_order, random, oracle policy modes get fixed_order inference mode
        if re.search('fastinf', self.policy_mode):
            self.inference_mode = 'fastinf'
        elif self.policy_mode == 'random':
            self.inference_mode = 'random'
        elif self.policy_mode in ['no_smooth', 'backoff']:
            self.inference_mode = self.policy_mode
        else:
            self.inference_mode = 'fixed_order'

        # Determine fastinf model name
        self.fastinf_model_name = 'this_is_empty'
        if self.inference_mode == 'fastinf':
            if self.detectors == ['csc_default']:
                self.fastinf_model_name = 'CSC'
            elif self.detectors == ['perfect']:
                self.fastinf_model_name = 'perfect'
            elif self.detectors == ['gist']:
                self.fastinf_model_name = 'GIST'
            elif self.detectors == ['gist', 'csc_default']:
                self.fastinf_model_name = 'GIST_CSC'
            else:
                raise RuntimeError("""
          We don't have Fastinf models for the detector combination you
          are running with: %s""" % self.detectors)

        # load the actions and the corresponding weights and we're ready to go
        self.test_actions = self.init_actions()
        self.actions = self.test_actions
        self.load_weights()
Beispiel #3
0
 def setup(self):
     train_dataset = Dataset('test_pascal_train', force=True)
     dataset = Dataset('test_pascal_val', force=True)
     self.dp = DatasetPolicy(dataset, train_dataset, detector='perfect')
     self.evaluation = Evaluation(self.dp)
Beispiel #4
0
def main():
  parser = argparse.ArgumentParser(
    description="Run experiments with the timely detection system.")

  parser.add_argument('--test_dataset',
    choices=['val','test','trainval'],
    default='val',
    help="""Dataset to use for testing. Run on val until final runs.
    The training dataset is inferred (val->train; test->trainval; trainval->trainval).""")

  parser.add_argument('--first_n', type=int,
    help='only take the first N images in the test dataset')

  parser.add_argument('--first_n_train', type=int,
    help='only take the first N images in the train dataset')

  parser.add_argument('--config',
    help="""Config file name that specifies the experiments to run.
    Give name such that the file is configs/#{name}.json or configs/#{name}/
    In the latter case, all files within the directory will be loaded.""")

  parser.add_argument('--suffix',
    help="Overwrites the suffix in the config(s).")

  parser.add_argument('--bounds10', action='store_true', 
    default=False, help='set bounds to [0,10]')
  
  parser.add_argument('--bounds515', action='store_true', 
    default=False, help='set bounds to [5,15]')

  parser.add_argument('--force', action='store_true', 
    default=False, help='force overwrite')

  parser.add_argument('--wholeset_prs', action='store_true', 
    default=False, help='evaluate in the final p-r regime')

  parser.add_argument('--no_apvst', action='store_true', 
    default=False, help='do NOT evaluate in the ap vs. time regime')

  parser.add_argument('--det_configs', action='store_true', 
    default=False, help='output detector statistics to det_configs')

  parser.add_argument('--inverse_prior', action='store_true', 
    default=False, help='use inverse prior class values')

  args = parser.parse_args()
  print(args)

  # If config file is not given, just run one experiment using default config
  if not args.config:
    configs = [DatasetPolicy.default_config]
  else:
    configs = load_configs(args.config)

  # Load the dataset
  dataset = Dataset('full_pascal_'+args.test_dataset)
  if args.first_n:
    dataset.images = dataset.images[:args.first_n]

  # Infer train_dataset
  if args.test_dataset=='test':
    train_dataset = Dataset('full_pascal_trainval')
  elif args.test_dataset=='val':
    train_dataset = Dataset('full_pascal_train')
  elif args.test_dataset=='trainval':
    train_dataset = Dataset('full_pascal_trainval')
  else:
    None # impossible by argparse settings
  
  # Only need to set training dataset values; evaluation gets it from there
  if args.inverse_prior:
    train_dataset.set_values('inverse_prior')

  # TODO: hack
  if args.first_n_train:
    train_dataset.images = train_dataset.images[:args.first_n_train]

  # In both the above cases, we use the val dataset for weights
  weights_dataset_name = 'full_pascal_val'

  dets_tables = []
  dets_tables_whole = []
  clses_tables_whole = []
  all_bounds = []
      
  plot_infos = [] 
  for config_f in configs:
    if args.suffix:
      config_f['suffix'] = args.suffix
    if args.bounds10:
      config_f['bounds'] = [0,10]
    if args.bounds515:
      config_f['bounds'] = [5,15]
    assert(not (args.bounds10 and args.bounds515))
    if args.inverse_prior:
      config_f['suffix'] += '_inverse_prior'
      config_f['values'] = 'inverse_prior'

    dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f)
    ev = Evaluation(dp)
    all_bounds.append(dp.bounds)
    plot_infos.append(dict((k,config_f[k]) for k in ('label','line','color') if k in config_f))
    # output the det configs first
    if args.det_configs:
      dp.output_det_statistics()

    # evaluate in the AP vs. Time regime, unless told not to
    if not args.no_apvst:
      dets_table = ev.evaluate_vs_t(None,None,force=args.force)
      #dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force)
      if comm_rank==0:
        dets_tables.append(dets_table)
        #dets_tables_whole.append(dets_table_whole)
        #clses_tables_whole.append(clses_table_whole)

    # optionally, evaluate in the standard PR regime
    if args.wholeset_prs:
      ev.evaluate_detections_whole(None,force=args.force)

  # and plot the comparison if multiple config files were given
  if not args.no_apvst and len(configs)>1 and comm_rank==0:
    # filename of the final plot is the config file name
    dirname = config.get_evals_dir(dataset.get_name())
    filename = args.config
    if args.inverse_prior:
      filename += '_inverse_prior'
    
    # det avg
    ff = opjoin(dirname, '%s_det_avg.png'%filename)
    ff_nl = opjoin(dirname, '%s_det_avg_nl.png'%filename)

    # make sure directory exists
    ut.makedirs(os.path.dirname(ff))
    
    Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
    Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

    if False:
      # det whole
      ff = opjoin(dirname, '%s_det_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_det_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

      # cls whole
      ff = opjoin(dirname, '%s_cls_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_cls_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)