コード例 #1
0
ファイル: dataset.py プロジェクト: raldam/timely
class TestDatasetJson:
    def setup(self):
        self.d = Dataset('test_data1', force=True)
        self.classes = ["A", "B", "C"]

    def test_load(self):
        assert (self.d.num_images() == 4)
        assert (self.d.classes == self.classes)

    def test_get_det_gt(self):
        gt = self.d.get_det_gt(with_diff=True, with_trun=False)
        df = Table(
            np.array([[0., 0., 0., 0., 0., 0, 0, 0.],
                      [1., 1., 1., 1., 1., 0, 0, 0.],
                      [1., 1., 1., 0., 0., 0, 0, 1.],
                      [0., 0., 0., 0., 1., 0, 0, 2.],
                      [0., 0., 0., 0., 2., 0, 0, 3.],
                      [1., 1., 1., 1., 2., 0, 0, 3.]]),
            ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind'])
        print(gt)
        print(df)
        assert (gt == df)

    def test_get_cls_counts_json(self):
        arr = np.array([[1, 1, 0], [1, 0, 0], [0, 1, 0], [0, 0, 2]])
        print(self.d.get_cls_counts())
        assert (np.all(self.d.get_cls_counts() == arr))

    def test_get_cls_ground_truth_json(self):
        table = Table(
            np.array([[True, True, False], [True, False, False],
                      [False, True, False], [False, False, True]]),
            ["A", "B", "C"])
        assert (self.d.get_cls_ground_truth() == table)

    def test_det_ground_truth_for_class_json(self):
        gt = self.d.get_det_gt_for_class("A", with_diff=True, with_trun=True)
        arr = np.array([[0., 0., 0., 0., 0., 0., 0, 0.],
                        [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert (np.all(gt.arr == arr))
        assert (gt.cols == cols)

        # no diff or trun
        gt = self.d.get_det_gt_for_class("A", with_diff=False, with_trun=False)
        arr = np.array([[0., 0., 0., 0., 0., 0., 0, 0.],
                        [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert (np.all(gt.arr == arr))
        assert (gt.cols == cols)

    def test_set_values(self):
        assert (np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_values('uniform')
        assert (np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_values('inverse_prior')
        print(self.d.values)
        assert (np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
コード例 #2
0
class TestEvaluationSynthetic:
  def __init__(self):
    self.d = Dataset('test_data2',force=True)
    self.classes = ["A","B","C"]
    self.det_gt = self.d.get_det_gt()

  def test(self):
    scores = np.ones(self.det_gt.shape[0])
    dets = self.det_gt.append_column('score',scores)

    scores = np.ones(self.d.get_det_gt_for_class('A').shape[0])
    dets_just_A = self.d.get_det_gt_for_class('A')
    dets_just_A = dets_just_A.append_column('score',scores)

    self.d.set_values('uniform')
    assert(np.all(self.d.values == 1./3 * np.ones(len(self.classes))))
    dp = DatasetPolicy(self.d,self.d,detector='perfect')
    ev = Evaluation(dp)
    ap = ev.compute_det_map(dets,self.det_gt)
    assert(ap==1)
    ap = ev.compute_det_map(dets_just_A,self.det_gt)
    print(ap)
    assert(ut.fequal(ap, 0.33333333333333))

    self.d.set_values('inverse_prior')
    assert(np.all(self.d.values == np.array([0.25,0.25,0.5])))
    dp = DatasetPolicy(self.d,self.d,detector='perfect')
    ev = Evaluation(dp)
    ap = ev.compute_det_map(dets,self.det_gt)
    assert(ap==1)
    ap = ev.compute_det_map(dets_just_A,self.det_gt)
    print(ap)
    assert(ut.fequal(ap, 0.25))
コード例 #3
0
ファイル: evaluation.py プロジェクト: raldam/timely
class TestEvaluationSynthetic:
    def __init__(self):
        self.d = Dataset('test_data2', force=True)
        self.classes = ["A", "B", "C"]
        self.det_gt = self.d.get_det_gt()

    def test(self):
        scores = np.ones(self.det_gt.shape[0])
        dets = self.det_gt.append_column('score', scores)

        scores = np.ones(self.d.get_det_gt_for_class('A').shape[0])
        dets_just_A = self.d.get_det_gt_for_class('A')
        dets_just_A = dets_just_A.append_column('score', scores)

        self.d.set_values('uniform')
        assert (np.all(self.d.values == 1. / 3 * np.ones(len(self.classes))))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.33333333333333))

        self.d.set_values('inverse_prior')
        assert (np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.25))
コード例 #4
0
def main():
    parser = argparse.ArgumentParser(description="Run experiments with the timely detection system.")

    parser.add_argument(
        "--test_dataset",
        choices=["val", "test", "trainval"],
        default="val",
        help="""Dataset to use for testing. Run on val until final runs.
    The training dataset is inferred (val->train; test->trainval; trainval->trainval).""",
    )

    parser.add_argument("--first_n", type=int, help="only take the first N images in the test dataset")

    parser.add_argument("--first_n_train", type=int, help="only take the first N images in the train dataset")

    parser.add_argument(
        "--config",
        help="""Config file name that specifies the experiments to run.
    Give name such that the file is configs/#{name}.json or configs/#{name}/
    In the latter case, all files within the directory will be loaded.""",
    )

    parser.add_argument("--suffix", help="Overwrites the suffix in the config(s).")

    parser.add_argument("--bounds10", action="store_true", default=False, help="set bounds to [0,10]")

    parser.add_argument("--bounds515", action="store_true", default=False, help="set bounds to [5,15]")

    parser.add_argument("--force", action="store_true", default=False, help="force overwrite")

    parser.add_argument("--wholeset_prs", action="store_true", default=False, help="evaluate in the final p-r regime")

    parser.add_argument(
        "--no_apvst", action="store_true", default=False, help="do NOT evaluate in the ap vs. time regime"
    )

    parser.add_argument(
        "--det_configs", action="store_true", default=False, help="output detector statistics to det_configs"
    )

    parser.add_argument("--inverse_prior", action="store_true", default=False, help="use inverse prior class values")

    args = parser.parse_args()
    print(args)

    # If config file is not given, just run one experiment using default config
    if not args.config:
        configs = [DatasetPolicy.default_config]
    else:
        configs = load_configs(args.config)

    # Load the dataset
    dataset = Dataset("full_pascal_" + args.test_dataset)
    if args.first_n:
        dataset.images = dataset.images[: args.first_n]

    # Infer train_dataset
    if args.test_dataset == "test":
        train_dataset = Dataset("full_pascal_trainval")
    elif args.test_dataset == "val":
        train_dataset = Dataset("full_pascal_train")
    elif args.test_dataset == "trainval":
        train_dataset = Dataset("full_pascal_trainval")
    else:
        None  # impossible by argparse settings

    # Only need to set training dataset values; evaluation gets it from there
    if args.inverse_prior:
        train_dataset.set_values("inverse_prior")

    # TODO: hack
    if args.first_n_train:
        train_dataset.images = train_dataset.images[: args.first_n_train]

    # In both the above cases, we use the val dataset for weights
    weights_dataset_name = "full_pascal_val"

    dets_tables = []
    dets_tables_whole = []
    clses_tables_whole = []
    all_bounds = []

    plot_infos = []
    for config_f in configs:
        if args.suffix:
            config_f["suffix"] = args.suffix
        if args.bounds10:
            config_f["bounds"] = [0, 10]
        if args.bounds515:
            config_f["bounds"] = [5, 15]
        assert not (args.bounds10 and args.bounds515)
        if args.inverse_prior:
            config_f["suffix"] += "_inverse_prior"
            config_f["values"] = "inverse_prior"

        dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f)
        ev = Evaluation(dp)
        all_bounds.append(dp.bounds)
        plot_infos.append(dict((k, config_f[k]) for k in ("label", "line", "color") if k in config_f))
        # output the det configs first
        if args.det_configs:
            dp.output_det_statistics()

        # evaluate in the AP vs. Time regime, unless told not to
        if not args.no_apvst:
            dets_table = ev.evaluate_vs_t(None, None, force=args.force)
            # dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force)
            if comm_rank == 0:
                dets_tables.append(dets_table)
                # dets_tables_whole.append(dets_table_whole)
                # clses_tables_whole.append(clses_table_whole)

        # optionally, evaluate in the standard PR regime
        if args.wholeset_prs:
            ev.evaluate_detections_whole(None, force=args.force)

    # and plot the comparison if multiple config files were given
    if not args.no_apvst and len(configs) > 1 and comm_rank == 0:
        # filename of the final plot is the config file name
        dirname = config.get_evals_dir(dataset.get_name())
        filename = args.config
        if args.inverse_prior:
            filename += "_inverse_prior"

        # det avg
        ff = opjoin(dirname, "%s_det_avg.png" % filename)
        ff_nl = opjoin(dirname, "%s_det_avg_nl.png" % filename)

        # make sure directory exists
        ut.makedirs(os.path.dirname(ff))

        Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
        Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

        if False:
            # det whole
            ff = opjoin(dirname, "%s_det_whole.png" % filename)
            ff_nl = opjoin(dirname, "%s_det_whole_nl.png" % filename)
            Evaluation.plot_ap_vs_t(
                dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos
            )
            Evaluation.plot_ap_vs_t(
                dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos
            )

            # cls whole
            ff = opjoin(dirname, "%s_cls_whole.png" % filename)
            ff_nl = opjoin(dirname, "%s_cls_whole_nl.png" % filename)
            Evaluation.plot_ap_vs_t(
                clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos
            )
            Evaluation.plot_ap_vs_t(
                clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos
            )
コード例 #5
0
ファイル: run_experiment.py プロジェクト: raldam/timely
def main():
  parser = argparse.ArgumentParser(
    description="Run experiments with the timely detection system.")

  parser.add_argument('--test_dataset',
    choices=['val','test','trainval'],
    default='val',
    help="""Dataset to use for testing. Run on val until final runs.
    The training dataset is inferred (val->train; test->trainval; trainval->trainval).""")

  parser.add_argument('--first_n', type=int,
    help='only take the first N images in the test dataset')

  parser.add_argument('--first_n_train', type=int,
    help='only take the first N images in the train dataset')

  parser.add_argument('--config',
    help="""Config file name that specifies the experiments to run.
    Give name such that the file is configs/#{name}.json or configs/#{name}/
    In the latter case, all files within the directory will be loaded.""")

  parser.add_argument('--suffix',
    help="Overwrites the suffix in the config(s).")

  parser.add_argument('--bounds10', action='store_true', 
    default=False, help='set bounds to [0,10]')
  
  parser.add_argument('--bounds515', action='store_true', 
    default=False, help='set bounds to [5,15]')

  parser.add_argument('--force', action='store_true', 
    default=False, help='force overwrite')

  parser.add_argument('--wholeset_prs', action='store_true', 
    default=False, help='evaluate in the final p-r regime')

  parser.add_argument('--no_apvst', action='store_true', 
    default=False, help='do NOT evaluate in the ap vs. time regime')

  parser.add_argument('--det_configs', action='store_true', 
    default=False, help='output detector statistics to det_configs')

  parser.add_argument('--inverse_prior', action='store_true', 
    default=False, help='use inverse prior class values')

  args = parser.parse_args()
  print(args)

  # If config file is not given, just run one experiment using default config
  if not args.config:
    configs = [DatasetPolicy.default_config]
  else:
    configs = load_configs(args.config)

  # Load the dataset
  dataset = Dataset('full_pascal_'+args.test_dataset)
  if args.first_n:
    dataset.images = dataset.images[:args.first_n]

  # Infer train_dataset
  if args.test_dataset=='test':
    train_dataset = Dataset('full_pascal_trainval')
  elif args.test_dataset=='val':
    train_dataset = Dataset('full_pascal_train')
  elif args.test_dataset=='trainval':
    train_dataset = Dataset('full_pascal_trainval')
  else:
    None # impossible by argparse settings
  
  # Only need to set training dataset values; evaluation gets it from there
  if args.inverse_prior:
    train_dataset.set_values('inverse_prior')

  # TODO: hack
  if args.first_n_train:
    train_dataset.images = train_dataset.images[:args.first_n_train]

  # In both the above cases, we use the val dataset for weights
  weights_dataset_name = 'full_pascal_val'

  dets_tables = []
  dets_tables_whole = []
  clses_tables_whole = []
  all_bounds = []
      
  plot_infos = [] 
  for config_f in configs:
    if args.suffix:
      config_f['suffix'] = args.suffix
    if args.bounds10:
      config_f['bounds'] = [0,10]
    if args.bounds515:
      config_f['bounds'] = [5,15]
    assert(not (args.bounds10 and args.bounds515))
    if args.inverse_prior:
      config_f['suffix'] += '_inverse_prior'
      config_f['values'] = 'inverse_prior'

    dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f)
    ev = Evaluation(dp)
    all_bounds.append(dp.bounds)
    plot_infos.append(dict((k,config_f[k]) for k in ('label','line','color') if k in config_f))
    # output the det configs first
    if args.det_configs:
      dp.output_det_statistics()

    # evaluate in the AP vs. Time regime, unless told not to
    if not args.no_apvst:
      dets_table = ev.evaluate_vs_t(None,None,force=args.force)
      #dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force)
      if comm_rank==0:
        dets_tables.append(dets_table)
        #dets_tables_whole.append(dets_table_whole)
        #clses_tables_whole.append(clses_table_whole)

    # optionally, evaluate in the standard PR regime
    if args.wholeset_prs:
      ev.evaluate_detections_whole(None,force=args.force)

  # and plot the comparison if multiple config files were given
  if not args.no_apvst and len(configs)>1 and comm_rank==0:
    # filename of the final plot is the config file name
    dirname = config.get_evals_dir(dataset.get_name())
    filename = args.config
    if args.inverse_prior:
      filename += '_inverse_prior'
    
    # det avg
    ff = opjoin(dirname, '%s_det_avg.png'%filename)
    ff_nl = opjoin(dirname, '%s_det_avg_nl.png'%filename)

    # make sure directory exists
    ut.makedirs(os.path.dirname(ff))
    
    Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
    Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

    if False:
      # det whole
      ff = opjoin(dirname, '%s_det_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_det_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

      # cls whole
      ff = opjoin(dirname, '%s_cls_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_cls_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)
コード例 #6
0
class TestDatasetJson:
  def setup(self):
    self.d = Dataset('test_data1',force=True)
    self.classes = ["A","B","C"]

  def test_load(self):
    assert(self.d.num_images() == 4)
    assert(self.d.classes == self.classes)

  def test_get_det_gt(self):
    gt = self.d.get_det_gt(with_diff=True,with_trun=False)
    df = Table(
      np.array([[ 0.,  0.,  0.,  0.,  0.,  0, 0, 0.],
       [ 1.,  1.,  1.,  1.,  1.,  0, 0, 0.],
       [ 1.,  1.,  1.,  0.,  0.,  0, 0, 1.],
       [ 0.,  0.,  0.,  0.,  1.,  0, 0, 2.],
       [ 0.,  0.,  0.,  0.,  2.,  0, 0, 3.],
       [ 1.,  1.,  1.,  1.,  2.,  0, 0, 3.]]),
       ['x','y','w','h','cls_ind','diff','trun','img_ind'])
    print(gt)
    print(df)
    assert(gt == df)

  def test_get_cls_counts_json(self):
    arr = np.array(
      [ [ 1, 1, 0],
        [ 1, 0, 0],
        [ 0, 1, 0],
        [ 0, 0, 2]])
    print(self.d.get_cls_counts())
    assert(np.all(self.d.get_cls_counts() == arr))

  def test_get_cls_ground_truth_json(self):
    table = Table(
      np.array([ [ True, True, False],
        [ True, False, False],
        [ False, True, False],
        [ False, False, True] ]), ["A","B","C"])
    assert(self.d.get_cls_ground_truth()==table)

  def test_det_ground_truth_for_class_json(self):
    gt = self.d.get_det_gt_for_class("A",with_diff=True,with_trun=True)
    arr = np.array(
      [[ 0.,  0.,  0.,  0.,  0., 0., 0, 0.],
       [ 1.,  1.,  1.,  0.,  0., 0., 0., 1.]])
    cols = ['x','y','w','h','cls_ind','diff','trun','img_ind']
    print(gt.arr)
    assert(np.all(gt.arr == arr))
    assert(gt.cols == cols)

    # no diff or trun
    gt = self.d.get_det_gt_for_class("A",with_diff=False,with_trun=False)
    arr = np.array(
      [[ 0.,  0.,  0.,  0.,  0., 0., 0, 0.],
       [ 1.,  1.,  1.,  0.,  0., 0., 0., 1.]])
    cols = ['x','y','w','h','cls_ind','diff','trun','img_ind']
    print(gt.arr)
    assert(np.all(gt.arr == arr))
    assert(gt.cols == cols)

  def test_set_values(self):
    assert(np.all(self.d.values == 1/3. * np.ones(len(self.classes))))
    self.d.set_values('uniform')
    assert(np.all(self.d.values == 1/3. * np.ones(len(self.classes))))
    self.d.set_values('inverse_prior')
    print(self.d.values)
    assert(np.all(self.d.values == np.array([0.25,0.25,0.5])))