def test_learn_weights(self):
     dataset = Dataset("full_pascal_val")
     train_dataset = Dataset("full_pascal_train")
     dataset.images = dataset.images[:20]
     train_dataset.images = train_dataset.images[:20]
     dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name, **self.config)
     weights = dp.learn_weights()
 def __init__(self):
     self.d = Dataset('full_pascal_trainval')
     self.d_val = Dataset('full_pascal_test')
     self.cls = 'dog'
     suffix = 'default'
     self.csc = CSCClassifier(suffix, self.cls, self.d, self.d_val)
     csc_test = np.load(config.get_ext_dets_filename(self.d, 'csc_default'))
     self.dets = csc_test[()]
Esempio n. 3
0
 def test_ground_truth_test(self):
     d = Dataset(test_config, 'test_pascal_val')
     d.load_from_pascal('val')
     gt = d.get_det_gt(with_diff=False, with_trun=False)
     correct = np.matrix(
         [[139., 200., 69., 102., 18., 0., 0., 0.],
          [123., 155., 93., 41., 17., 0., 0., 1.],
          [239., 156., 69., 50., 8., 0., 0., 1.]])
     print(gt)
     assert np.all(gt.arr == correct)
Esempio n. 4
0
 def __init__(self):
     self.dataset = Dataset('test_pascal_val')
     self.train_dataset = Dataset('test_pascal_train')
     self.weights_dataset_name = 'test_pascal_val'
     self.config = {
         'suffix': 'default',
         'detectors':
         ['perfect'],  # perfect,perfect_with_noise,dpm,csc_default,csc_half
         'policy_mode': 'random',
         'bounds': None,
         'weights_mode':
         'manual_1'  # manual_1, manual_2, manual_3, greedy, rl
     }
     self.dp = DatasetPolicy(self.dataset, self.train_dataset,
                             self.weights_dataset_name, **self.config)
 def __init__(self):
     self.dataset = Dataset("test_pascal_val")
     self.train_dataset = Dataset("test_pascal_train")
     self.weights_dataset_name = "test_pascal_val"
     self.config = {
         "suffix": "default",
         "detectors": ["perfect"],  # perfect,perfect_with_noise,dpm,csc_default,csc_half
         "policy_mode": "random",
         "bounds": None,
         "weights_mode": "manual_1",  # manual_1, manual_2, manual_3, greedy, rl
     }
     self.dp = DatasetPolicy(self.dataset, self.train_dataset, self.weights_dataset_name, **self.config)
Esempio n. 6
0
 def test_learn_weights(self):
     dataset = Dataset('full_pascal_val')
     train_dataset = Dataset('full_pascal_train')
     dataset.images = dataset.images[:20]
     train_dataset.images = train_dataset.images[:20]
     dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name,
                        **self.config)
     weights = dp.learn_weights()
Esempio n. 7
0
    def setUp(self):
        self.d = Dataset(test_config, 'test_data2')
        self.d.load_from_json(test_data2)
        self.classes = self.d.classes

        det_gt = self.d.get_det_gt()

        # perfect detections
        scores = np.ones(det_gt.shape[0])
        self.full_dets = det_gt.append_column('score', scores)

        # perfect detections, but only for class 'A'
        dets_just_A = self.d.get_det_gt_for_class('A')
        scores = np.ones(dets_just_A.shape[0])
        self.partial_dets = dets_just_A.append_column('score', scores)
Esempio n. 8
0
class TestDatasetPascal:
    def setup(self):
        self.d = Dataset(test_config, 'test_pascal_train')
        self.d.load_from_pascal('train')

    def test_gt(self):
        assert(self.d.num_classes() == 20)
        assert('dog' in self.d.classes)

    def test_gt_for_class(self):
        correct = np.array(
            [[48., 240., 148., 132., 11., 0., 1., 0.]])
        ans = self.d.get_det_gt_for_class("dog")
        print ans
        assert np.all(ans.arr == correct)

    def test_neg_samples(self):
        # unlimited negative examples
        indices = self.d.get_neg_samples_for_class(
            "dog", with_diff=True, with_trun=True)
        correct = np.array([1, 2])
        assert(np.all(indices == correct))

        # maximum 1 negative example
        indices = self.d.get_neg_samples_for_class(
            "dog", 1, with_diff=True, with_trun=True)
        correct1 = np.array([1])
        correct2 = np.array([2])
        print(indices)
        assert(np.all(indices == correct1) or np.all(indices == correct2))

    def test_pos_samples(self):
        indices = self.d.get_pos_samples_for_class("dog")
        correct = np.array([0])
        assert(np.all(indices == correct))

    def test_ground_truth_test(self):
        d = Dataset(test_config, 'test_pascal_val')
        d.load_from_pascal('val')
        gt = d.get_det_gt(with_diff=False, with_trun=False)
        correct = np.matrix(
            [[139., 200., 69., 102., 18., 0., 0., 0.],
             [123., 155., 93., 41., 17., 0., 0., 1.],
             [239., 156., 69., 50., 8., 0., 0., 1.]])
        print(gt)
        assert np.all(gt.arr == correct)

    def test_get_pos_windows(self):
        pass
Esempio n. 9
0
def test():
    dataset = Dataset('full_pascal_trainval')
    fm = FastinfModel(dataset, 'perfect', 20)
    # NOTE: just took values from a run of the thing

    prior_correct = [
        float(x) for x in
        "0.050543  0.053053  0.073697  0.038331  0.050954  0.041879  0.16149\
    0.068721  0.10296   0.026837  0.043779  0.087683  0.063447  0.052205\
    0.41049   0.051664  0.014211  0.068361  0.056969  0.05046".split()
    ]
    np.testing.assert_almost_equal(fm.p_c, prior_correct, 4)

    observations = np.zeros(20)
    taken = np.zeros(20)
    fm.update_with_observations(taken, observations)
    np.testing.assert_almost_equal(fm.p_c, prior_correct, 4)
    observations[5] = 1
    taken[5] = 1
    fm.update_with_observations(taken, observations)
    print fm.p_c
    correct = [
        float(x) for x in
        "0.027355   0.11855    0.027593   0.026851   0.012569   0.98999    0.52232\
    0.017783   0.010806   0.015199   0.0044641  0.02389    0.033602   0.089089\
    0.50297    0.0083272  0.0088274  0.0098522  0.034259   0.0086298".split()
    ]
    np.testing.assert_almost_equal(fm.p_c, correct, 4)
    observations[15] = 0
    taken[15] = 1
    fm.update_with_observations(taken, observations)
    correct = [
        float(x) for x in
        "2.73590000e-02   1.19030000e-01   2.75500000e-02   2.68760000e-02 \
   1.23920000e-02   9.90200000e-01   5.25320000e-01   1.76120000e-02 \
   1.05030000e-02   1.52130000e-02   4.26410000e-03   2.38250000e-02 \
   3.36870000e-02   8.96450000e-02   5.04300000e-01   8.71880000e-05 \
   8.82630000e-03   9.55290000e-03   3.43240000e-02   8.44510000e-03".split()
    ]
    np.testing.assert_almost_equal(fm.p_c, correct)

    # reinit_marginals
    fm.reset()
    np.testing.assert_equal(fm.p_c, prior_correct)

    print(fm.cache)
Esempio n. 10
0
def simply_run_it(dataset):
  parser = argparse.ArgumentParser(
    description="Run fastInf experiments.")

  parser.add_argument('-m',type=int,
    default=0,
    choices=[0,1,2,3,4,5],
    help="""optimization method 0-FR, 1-PR, 2-BFGS, 3-STEEP, 4-NEWTON, 5-GRADIENT (0).""")

  parser.add_argument('-r',type=int,
    default=1,
    help="""parameter of L1 regularization.""")
  
  args = parser.parse_args()
  
  m = args.m
  r = args.r
  d = Dataset(dataset)
  suffixs = ['CSC_regions']
  run_fastinf_different_settings(d, [m], [r], suffixs)
Esempio n. 11
0
class TestDatasetPolicy:
    def __init__(self):
        self.dataset = Dataset('test_pascal_val')
        self.train_dataset = Dataset('test_pascal_train')
        self.weights_dataset_name = 'test_pascal_val'
        self.config = {
            'suffix': 'default',
            'detectors':
            ['perfect'],  # perfect,perfect_with_noise,dpm,csc_default,csc_half
            'policy_mode': 'random',
            'bounds': None,
            'weights_mode':
            'manual_1'  # manual_1, manual_2, manual_3, greedy, rl
        }
        self.dp = DatasetPolicy(self.dataset, self.train_dataset,
                                self.weights_dataset_name, **self.config)

    def test_run_on_dataset(self):
        # run on test dataset
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        assert (len(samples) == clses.shape[0])
        assert (len(samples) == self.dp.dataset.num_images() *
                len(self.dp.actions))
        train_dets, train_clses, train_samples = self.dp.run_on_dataset(
            train=True, force=True)
        assert (len(train_samples) == train_clses.shape[0])
        assert (len(train_samples) == self.dp.train_dataset.num_images() *
                len(self.dp.actions))

    def test_unique_samples(self):
        "Test the correctness of making a list of samples unique."
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        new_sample = copy.deepcopy(samples[11])
        new_sample2 = copy.deepcopy(samples[11])
        new_sample2.dt = -40  # an unreasonable value
        assert (new_sample in samples)
        assert (new_sample2 not in samples)

    def test_output_det_statistics(self):
        self.dp.output_det_statistics()

    def test_learn_weights(self):
        dataset = Dataset('full_pascal_val')
        train_dataset = Dataset('full_pascal_train')
        dataset.images = dataset.images[:20]
        train_dataset.images = train_dataset.images[:20]
        dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name,
                           **self.config)
        weights = dp.learn_weights()

    def test_regress(self):
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        weights, error = self.dp.regress(samples, 'greedy')
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)
        samples += samples
        weights, error = self.dp.regress(samples, 'greedy')
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)
        samples += samples
        weights, error = self.dp.regress(samples, 'greedy')
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)

    def test_load_weights(self):
        modes = ['manual_1', 'manual_2', 'manual_3']
        for mode in modes:
            print "%s weights:" % mode
            self.dp.weights_mode = mode
            self.dp.load_weights()
            print self.dp.get_reshaped_weights()
            assert (self.dp.weights.shape[0] == len(self.dp.actions) *
                    BeliefState.num_features)
            self.dp.write_weights_image('temp_weights_%s.png' % mode)

    def test_perfect_detector(self):
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        #embed()
        dets = dets.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])
        gt = self.dataset.get_det_gt()
        gt = gt.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])

        # TODO: does this make sense?
        dets.sort_by_column('x')
        gt.sort_by_column('x')
        print dets
        print gt
        assert (dets == gt)

    def test_load_dpm_detections(self):
        conf = dict(self.config)
        conf['detectors'] = ['dpm']
        policy = DatasetPolicy(self.dataset, self.train_dataset, **conf)
        assert (policy.detectors == ['dpm'])
        dets = policy.load_ext_detections(self.dataset,
                                          'dpm_may25',
                                          force=True)
        dets = dets.with_column_omitted('time')

        # load the ground truth dets, processed in Matlab
        # (timely/data/test_support/concat_dets.m)
        filename = os.path.join(config.test_support_dir, 'val_dets.mat')
        dets_correct = Table(
            scipy.io.loadmat(filename)['dets'], [
                'x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy', 'dummy',
                'score', 'cls_ind', 'img_ind'
            ], 'dets_correct')
        dets_correct = dets_correct.subset(
            ['x1', 'y1', 'x2', 'y2', 'score', 'cls_ind', 'img_ind'])
        dets_correct.arr[:, :4] -= 1
        dets_correct.arr[:, :4] = BoundingBox.convert_arr_from_corners(
            dets_correct.arr[:, :4])
        dets_correct.cols = ['x', 'y', 'w', 'h', 'score', 'cls_ind', 'img_ind']

        print('----mine:')
        print(dets)
        print('----correct:')
        print(dets_correct)
        assert (dets_correct == dets)
Esempio n. 12
0
class TestDatasetJson(object):
    def setup(self):
        self.d = Dataset(test_config, 'test_data1')
        self.d.load_from_json(test_data1)
        self.classes = ["A", "B", "C"]

    def test_load(self):
        assert(self.d.num_images() == 4)
        assert(self.d.classes == self.classes)

    def test_get_det_gt(self):
        gt = self.d.get_det_gt(with_diff=True, with_trun=False)
        df = Table(
            np.array([[0., 0., 0., 0., 0., 0, 0, 0.],
                      [1., 1., 1., 1., 1., 0, 0, 0.],
                      [1., 1., 1., 0., 0., 0, 0, 1.],
                      [0., 0., 0., 0., 1., 0, 0, 2.],
                      [0., 0., 0., 0., 2., 0, 0, 3.],
                      [1., 1., 1., 1., 2., 0, 0, 3.]]),
            ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind'])
        print(gt)
        print(df)
        assert(gt == df)

    def test_get_cls_counts(self):
        arr = np.array(
            [[1, 1, 0],
                [1, 0, 0],
                [0, 1, 0],
                [0, 0, 2]])
        print(self.d.get_cls_counts())
        assert(np.all(self.d.get_cls_counts() == arr))

    def test_get_cls_ground_truth(self):
        table = Table(
            np.array([[True, True, False],
                     [True, False, False],
                     [False, True, False],
                     [False, False, True]]), ["A", "B", "C"])
        assert(self.d.get_cls_ground_truth() == table)

    def test_det_ground_truth_for_class(self):
        gt = self.d.get_det_gt_for_class("A", with_diff=True, with_trun=True)
        arr = np.array(
            [[0., 0., 0., 0., 0., 0., 0, 0.],
             [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert(np.all(gt.arr == arr))
        assert(gt.cols == cols)

        # no diff or trun
        gt = self.d.get_det_gt_for_class("A", with_diff=False, with_trun=False)
        arr = np.array(
            [[0., 0., 0., 0., 0., 0., 0, 0.],
             [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert(np.all(gt.arr == arr))
        assert(gt.cols == cols)

    def test_set_class_values(self):
        assert(np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_class_values('uniform')
        assert(np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_class_values('inverse_prior')
        print(self.d.values)
        assert(np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
Esempio n. 13
0
 def setup(self):
     self.d = Dataset(test_config, 'test_data1')
     self.d.load_from_json(test_data1)
     self.classes = ["A", "B", "C"]
class TestDatasetPolicy:
    def __init__(self):
        self.dataset = Dataset("test_pascal_val")
        self.train_dataset = Dataset("test_pascal_train")
        self.weights_dataset_name = "test_pascal_val"
        self.config = {
            "suffix": "default",
            "detectors": ["perfect"],  # perfect,perfect_with_noise,dpm,csc_default,csc_half
            "policy_mode": "random",
            "bounds": None,
            "weights_mode": "manual_1",  # manual_1, manual_2, manual_3, greedy, rl
        }
        self.dp = DatasetPolicy(self.dataset, self.train_dataset, self.weights_dataset_name, **self.config)

    def test_run_on_dataset(self):
        # run on test dataset
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        assert len(samples) == clses.shape[0]
        assert len(samples) == self.dp.dataset.num_images() * len(self.dp.actions)
        train_dets, train_clses, train_samples = self.dp.run_on_dataset(train=True, force=True)
        assert len(train_samples) == train_clses.shape[0]
        assert len(train_samples) == self.dp.train_dataset.num_images() * len(self.dp.actions)

    def test_unique_samples(self):
        "Test the correctness of making a list of samples unique."
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        new_sample = copy.deepcopy(samples[11])
        new_sample2 = copy.deepcopy(samples[11])
        new_sample2.dt = -40  # an unreasonable value
        assert new_sample in samples
        assert new_sample2 not in samples

    def test_output_det_statistics(self):
        self.dp.output_det_statistics()

    def test_learn_weights(self):
        dataset = Dataset("full_pascal_val")
        train_dataset = Dataset("full_pascal_train")
        dataset.images = dataset.images[:20]
        train_dataset.images = train_dataset.images[:20]
        dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name, **self.config)
        weights = dp.learn_weights()

    def test_regress(self):
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        weights, error = self.dp.regress(samples, "greedy")
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)
        samples += samples
        weights, error = self.dp.regress(samples, "greedy")
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)
        samples += samples
        weights, error = self.dp.regress(samples, "greedy")
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)

    def test_load_weights(self):
        modes = ["manual_1", "manual_2", "manual_3"]
        for mode in modes:
            print "%s weights:" % mode
            self.dp.weights_mode = mode
            self.dp.load_weights()
            print self.dp.get_reshaped_weights()
            assert self.dp.weights.shape[0] == len(self.dp.actions) * BeliefState.num_features
            self.dp.write_weights_image("temp_weights_%s.png" % mode)

    def test_perfect_detector(self):
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        # embed()
        dets = dets.subset(["x", "y", "w", "h", "cls_ind", "img_ind"])
        gt = self.dataset.get_det_gt()
        gt = gt.subset(["x", "y", "w", "h", "cls_ind", "img_ind"])

        # TODO: does this make sense?
        dets.sort_by_column("x")
        gt.sort_by_column("x")
        print dets
        print gt
        assert dets == gt

    def test_load_dpm_detections(self):
        conf = dict(self.config)
        conf["detectors"] = ["dpm"]
        policy = DatasetPolicy(self.dataset, self.train_dataset, **conf)
        assert policy.detectors == ["dpm"]
        dets = policy.load_ext_detections(self.dataset, "dpm_may25", force=True)
        dets = dets.with_column_omitted("time")

        # load the ground truth dets, processed in Matlab
        # (timely/data/test_support/concat_dets.m)
        filename = os.path.join(config.test_support_dir, "val_dets.mat")
        dets_correct = Table(
            scipy.io.loadmat(filename)["dets"],
            ["x1", "y1", "x2", "y2", "dummy", "dummy", "dummy", "dummy", "score", "cls_ind", "img_ind"],
            "dets_correct",
        )
        dets_correct = dets_correct.subset(["x1", "y1", "x2", "y2", "score", "cls_ind", "img_ind"])
        dets_correct.arr[:, :4] -= 1
        dets_correct.arr[:, :4] = BoundingBox.convert_arr_from_corners(dets_correct.arr[:, :4])
        dets_correct.cols = ["x", "y", "w", "h", "score", "cls_ind", "img_ind"]

        print ("----mine:")
        print (dets)
        print ("----correct:")
        print (dets_correct)
        assert dets_correct == dets
    print table 
    cPickle.dump(table, open('table','w'))
    print 'saved'
  return table

def conv(d_train, table_arr):
  table = Table()
  #table_arr = cPickle.load(open('table_linear_5','r'))
  table.arr = np.hstack((table_arr, np.array(np.arange(table_arr.shape[0]),ndmin=2).T))
  table.cols = d_train.classes + ['img_ind']
  print table
  #cPickle.dump(table, open('tab_linear_5','w'))
  return table
  
if __name__=='__main__':
  d_train = Dataset('full_pascal_trainval')
  d_val = Dataset('full_pascal_val')

  train_gt = d_train.get_cls_ground_truth()
  val_gt = d_val.get_cls_ground_truth()

  if mpi.comm_rank == 0:
    filename = os.path.join(config.get_classifier_dataset_dirname(CSCClassifier('default','dog', d_train, d_val), d_train),'crossval.txt')
  
  kernels =  ['linear']
  Cs = [50]
  
  settings = list(itertools.product(kernels, Cs))
  
  for setin in settings:
    kernel = setin[0]
Esempio n. 16
0
    return table


def conv(d_train, table_arr):
    table = Table()
    #table_arr = cPickle.load(open('table_linear_5','r'))
    table.arr = np.hstack(
        (table_arr, np.array(np.arange(table_arr.shape[0]), ndmin=2).T))
    table.cols = d_train.classes + ['img_ind']
    print table
    #cPickle.dump(table, open('tab_linear_5','w'))
    return table


if __name__ == '__main__':
    d_train = Dataset('full_pascal_trainval')
    d_val = Dataset('full_pascal_val')

    train_gt = d_train.get_cls_ground_truth()
    val_gt = d_val.get_cls_ground_truth()

    if mpi.comm_rank == 0:
        filename = os.path.join(
            config.get_classifier_dataset_dirname(
                CSCClassifier('default', 'dog', d_train, d_val), d_train),
            'crossval.txt')

    kernels = ['linear']
    Cs = [50]

    settings = list(itertools.product(kernels, Cs))
Esempio n. 17
0
      return np.zeros((1,intervals+1))
    dpm = feats.subset(['score', 'cls_ind', 'img_ind'])
    img_dpm = dpm.filter_on_column('img_ind', img, omit=True)
    if img_dpm.arr.size == 0:
      print 'empty vector'
      return np.zeros((1,intervals+1))
    cls_dpm = img_dpm.filter_on_column('cls_ind', cls, omit=True)
    hist = self.compute_histogram(cls_dpm.arr, intervals, lower, upper)
    vector = np.zeros((1, intervals+1))
    vector[0,0:-1] = hist
    vector[0,-1] = img_dpm.shape[0]
    return vector
  
if __name__=='__main__':
  train_set = 'full_pascal_train'
  train_dataset = Dataset(train_set)  
  dpm_dir = os.path.join(config.res_dir, 'dpm_dets')
  filename = os.path.join(dpm_dir, train_set + '_dets_all_may25_DP.npy')
  dpm_train = np.load(filename)
  dpm_train = dpm_train[()]  
  dpm_train = dpm_train.subset(['score', 'cls_ind', 'img_ind'])
  dpm_classif = DPMClassifier()
  dpm_train.arr = dpm_classif.normalize_dpm_scores(dpm_train.arr)
  
  val_set = 'full_pascal_val'
  test_dataset = Dataset(val_set)  
  dpm_test_dir = os.path.join(config.res_dir, 'dpm_dets')
  filename = os.path.join(dpm_dir, val_set + '_dets_all_may25_DP.npy')
  dpm_test = np.load(filename)
  dpm_test = dpm_test[()]  
  dpm_test = dpm_test.subset(['score', 'cls_ind', 'img_ind'])
Esempio n. 18
0
"""
Runner script to output cooccurrence statistics for the synthetic
and PASCAL datasets.
"""

from skvisutils import Dataset

datasets = [
  'synthetic',
  'full_pascal_train','full_pascal_trainval',
  'full_pascal_val','full_pascal_test']

for dataset in datasets:
  d = Dataset(dataset) 
  f = d.plot_coocurrence()
  f = d.plot_coocurrence(second_order=True)
  f = d.plot_distribution()
Esempio n. 19
0
 def setup(self):
     self.d = Dataset(test_config, 'test_pascal_train')
     self.d.load_from_pascal('train')
Esempio n. 20
0
class TestEvaluationSynthetic(unittest.TestCase):
    def setUp(self):
        self.d = Dataset(test_config, 'test_data2')
        self.d.load_from_json(test_data2)
        self.classes = self.d.classes

        det_gt = self.d.get_det_gt()

        # perfect detections
        scores = np.ones(det_gt.shape[0])
        self.full_dets = det_gt.append_column('score', scores)

        # perfect detections, but only for class 'A'
        dets_just_A = self.d.get_det_gt_for_class('A')
        scores = np.ones(dets_just_A.shape[0])
        self.partial_dets = dets_just_A.append_column('score', scores)

    def test_values(self):
        det_gt = self.d.get_det_gt()

        self.d.set_class_values('uniform')
        assert(np.all(self.d.values == 1. / 3 * np.ones(len(self.classes))))

        ap = evaluation.compute_det_map(self.full_dets, det_gt, self.d.values)
        assert(ap == 1)
        ap = evaluation.compute_det_map(
            self.partial_dets, det_gt, self.d.values)
        assert_almost_equal(ap, 1 / 3.)

        self.d.set_class_values('inverse_prior')
        assert(np.all(self.d.values == np.array([0.25, 0.25, 0.5])))

        ap = evaluation.compute_det_map(self.full_dets, det_gt, self.d.values)
        assert(ap == 1)
        ap = evaluation.compute_det_map(
            self.partial_dets, det_gt, self.d.values)
        assert_almost_equal(ap, 0.25)

    def test_compute_pr_multiclass(self):
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'img_ind', 'diff']
        dets_cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind', 'img_ind']

        # two objects of different classes in the image, perfect detection
        arr = np.array(
            [[0, 0, 10, 10, 0, 0, 0],
             [10, 10, 10, 10, 1, 0, 0]])
        gt = Table(arr, cols)

        dets_arr = np.array(
            [[0, 0, 10, 10, -1, -1, 0, 0],
             [10, 10, 10, 10, -1, -1, 1, 0]])
        dets = Table(dets_arr, dets_cols)

        # make sure gt and gt_cols aren't modified
        gt_arr_copy = gt.arr.copy()
        gt_cols_copy = list(gt.cols)
        ap, rec, prec = evaluation.compute_det_pr(dets, gt)
        assert(np.all(gt.arr == gt_arr_copy))
        assert(gt_cols_copy == gt.cols)

        correct_ap = 1
        correct_rec = np.array([0.5, 1])
        correct_prec = np.array([1, 1])
        print((ap, rec, prec))
        assert(correct_ap == ap)
        assert(np.all(correct_rec == rec))
        assert(np.all(correct_prec == prec))

        # some extra detections to generate false positives
        dets_arr = np.array(
            [[0, 0, 10, 10, -1, -1, 0, 0],
             [0, 0, 10, 10, 0, -1, 0, 0],
             [10, 10, 10, 10, 0, -1, 1, 0],
             [10, 10, 10, 10, -1, -1, 1, 0]])
        dets = Table(dets_arr, dets_cols)

        ap, rec, prec = evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.5, 1, 1, 1])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert(np.all(correct_rec == rec))
        assert(np.all(correct_prec == prec))

        # confirm that running on the same dets gives the same answer
        ap, rec, prec = evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.5, 1, 1, 1])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert(np.all(correct_rec == rec))
        assert(np.all(correct_prec == prec))

        # now let's add two objects of a different class to gt to lower recall
        arr = np.array(
            [[0, 0, 10, 10, 0, 0, 0],
             [10, 10, 10, 10, 1, 0, 0],
             [20, 20, 10, 10, 2, 0, 0],
             [30, 30, 10, 10, 2, 0, 0]])
        gt = Table(arr, cols)
        ap, rec, prec = evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.25, 0.5, 0.5, 0.5])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert(np.all(correct_rec == rec))
        assert(np.all(correct_prec == prec))

        # now call it with empty detections
        dets_arr = np.array([])
        dets = Table(dets_arr, dets_cols)
        ap, rec, prec = evaluation.compute_det_pr(dets, gt)
        correct_ap = 0
        correct_rec = np.array([0])
        correct_prec = np.array([0])
        print((ap, rec, prec))
        assert(np.all(correct_ap == ap))
        assert(np.all(correct_rec == rec))
        assert(np.all(correct_prec == prec))

    def test_plots(self):
        full_results_dirname = os.path.join(res_dir, 'full_dets_eval')
        partial_results_dirname = os.path.join(res_dir, 'partial_dets_eval')

        evaluation.evaluate_detections_whole(
            self.d, self.partial_dets, partial_results_dirname, force=True)
        assert(os.path.exists(
            os.path.join(partial_results_dirname, 'whole_dashboard.html')))
        pngs = glob.glob(os.path.join(partial_results_dirname, '*.png'))
        assert(len(pngs) == 4)  # 3 classes + 1 multiclass

        evaluation.evaluate_detections_whole(
            self.d, self.full_dets, full_results_dirname, force=True)
def main():
    parser = argparse.ArgumentParser(
        description="Run experiments with the timely detection system.")

    parser.add_argument('--test_dataset',
                        choices=['val', 'test', 'trainval'],
                        default='val',
                        help="""Dataset to use for testing. Run on val until final runs.
        The training dataset is inferred (val->train; test->trainval; trainval->trainval).""")

    parser.add_argument('--first_n', type=int,
                        help='only take the first N images in the test dataset')

    parser.add_argument('--first_n_train', type=int,
                        help='only take the first N images in the train dataset')

    parser.add_argument('--config',
                        help="""Config file name that specifies the experiments to run.
        Give name such that the file is configs/#{name}.json or configs/#{name}/
        In the latter case, all files within the directory will be loaded.""")

    parser.add_argument('--suffix',
                        help="Overwrites the suffix in the config(s).")

    parser.add_argument('--bounds10', action='store_true',
                        default=False, help='set bounds to [0,10]')

    parser.add_argument('--bounds515', action='store_true',
                        default=False, help='set bounds to [5,15]')

    parser.add_argument('--force', action='store_true',
                        default=False, help='force overwrite')

    parser.add_argument('--wholeset_prs', action='store_true',
                        default=False, help='evaluate in the final p-r regime')

    parser.add_argument('--no_apvst', action='store_true',
                        default=False, help='do NOT evaluate in the ap vs. time regime')

    parser.add_argument('--det_configs', action='store_true',
                        default=False, help='output detector statistics to det_configs')

    parser.add_argument('--inverse_prior', action='store_true',
                        default=False, help='use inverse prior class values')

    args = parser.parse_args()
    print(args)

    # If config file is not given, just run one experiment using default config
    if not args.config:
        configs = [DatasetPolicy.default_config]
    else:
        configs = load_configs(args.config)

    # Load the dataset
    dataset = Dataset('full_pascal_' + args.test_dataset)
    if args.first_n:
        dataset.images = dataset.images[:args.first_n]

    # Infer train_dataset
    if args.test_dataset == 'test':
        train_dataset = Dataset('full_pascal_trainval')
    elif args.test_dataset == 'val':
        train_dataset = Dataset('full_pascal_train')
    elif args.test_dataset == 'trainval':
        train_dataset = Dataset('full_pascal_trainval')
    else:
        None  # impossible by argparse settings

    # Only need to set training dataset values; evaluation gets it from there
    if args.inverse_prior:
        train_dataset.set_values('inverse_prior')

    # TODO: hack
    if args.first_n_train:
        train_dataset.images = train_dataset.images[:args.first_n_train]

    # In both the above cases, we use the val dataset for weights
    weights_dataset_name = 'full_pascal_val'

    dets_tables = []
    dets_tables_whole = []
    clses_tables_whole = []
    all_bounds = []

    plot_infos = []
    for config_f in configs:
        if args.suffix:
            config_f['suffix'] = args.suffix
        if args.bounds10:
            config_f['bounds'] = [0, 10]
        if args.bounds515:
            config_f['bounds'] = [5, 15]
        assert(not (args.bounds10 and args.bounds515))
        if args.inverse_prior:
            config_f['suffix'] += '_inverse_prior'
            config_f['values'] = 'inverse_prior'

        dp = DatasetPolicy(
            dataset, train_dataset, weights_dataset_name, **config_f)
        ev = Evaluation(config, dp)
        all_bounds.append(dp.bounds)
        plot_infos.append(dict((k, config_f[k]) for k in (
            'label', 'line', 'color') if k in config_f))
        # output the det configs first
        if args.det_configs:
            dp.output_det_statistics()

        # evaluate in the AP vs. Time regime, unless told not to
        if not args.no_apvst:
            dets_table = ev.evaluate_vs_t(None, None, force=args.force)
            # dets_table_whole,clses_table_whole =
            # ev.evaluate_vs_t_whole(None,None,force=args.force)
            if mpi.mpi.comm_rank == 0:
                dets_tables.append(dets_table)
                # dets_tables_whole.append(dets_table_whole)
                # clses_tables_whole.append(clses_table_whole)

        # optionally, evaluate in the standard PR regime
        if args.wholeset_prs:
            ev.evaluate_detections_whole(None, force=args.force)

    # and plot the comparison if multiple config files were given
    if not args.no_apvst and len(configs) > 1 and mpi.mpi.comm_rank == 0:
        # filename of the final plot is the config file name
        dirname = config.get_evals_dir(dataset.get_name())
        filename = args.config
        if args.inverse_prior:
            filename += '_inverse_prior'

        # det avg
        ff = os.path.join(dirname, '%s_det_avg.png' % filename)
        ff_nl = os.path.join(dirname, '%s_det_avg_nl.png' % filename)

        # make sure directory exists
        skutil.makedirs(os.path.dirname(ff))

        Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True,
                                force=True, plot_infos=plot_infos)
        Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds,
                                with_legend=False, force=True, plot_infos=plot_infos)

        if False:
            # det whole
            ff = os.path.join(dirname, '%s_det_whole.png' % filename)
            ff_nl = os.path.join(dirname, '%s_det_whole_nl.png' % filename)
            Evaluation.plot_ap_vs_t(dets_tables_whole, ff, all_bounds,
                                    with_legend=True, force=True, plot_infos=plot_infos)
            Evaluation.plot_ap_vs_t(dets_tables_whole, ff_nl, all_bounds,
                                    with_legend=False, force=True, plot_infos=plot_infos)

            # cls whole
            ff = os.path.join(dirname, '%s_cls_whole.png' % filename)
            ff_nl = os.path.join(dirname, '%s_cls_whole_nl.png' % filename)
            Evaluation.plot_ap_vs_t(clses_tables_whole, ff, all_bounds,
                                    with_legend=True, force=True, plot_infos=plot_infos)
            Evaluation.plot_ap_vs_t(clses_tables_whole, ff_nl, all_bounds,
                                    with_legend=False, force=True, plot_infos=plot_infos)
Esempio n. 22
0
 def setUp(self):
     self.train_dataset = Dataset(
         test_config, 'test_pascal_train').load_from_pascal('train')
     self.dataset = Dataset(
         test_config, 'test_pascal_val').load_from_pascal('val')
Esempio n. 23
0
def main():
    parser = argparse.ArgumentParser(
        description="Run experiments with the timely detection system.")

    parser.add_argument(
        '--test_dataset',
        choices=['val', 'test', 'trainval'],
        default='val',
        help="""Dataset to use for testing. Run on val until final runs.
        The training dataset is inferred (val->train; test->trainval; trainval->trainval)."""
    )

    parser.add_argument(
        '--first_n',
        type=int,
        help='only take the first N images in the test dataset')

    parser.add_argument(
        '--first_n_train',
        type=int,
        help='only take the first N images in the train dataset')

    parser.add_argument(
        '--config',
        help="""Config file name that specifies the experiments to run.
        Give name such that the file is configs/#{name}.json or configs/#{name}/
        In the latter case, all files within the directory will be loaded.""")

    parser.add_argument('--suffix',
                        help="Overwrites the suffix in the config(s).")

    parser.add_argument('--bounds10',
                        action='store_true',
                        default=False,
                        help='set bounds to [0,10]')

    parser.add_argument('--bounds515',
                        action='store_true',
                        default=False,
                        help='set bounds to [5,15]')

    parser.add_argument('--force',
                        action='store_true',
                        default=False,
                        help='force overwrite')

    parser.add_argument('--wholeset_prs',
                        action='store_true',
                        default=False,
                        help='evaluate in the final p-r regime')

    parser.add_argument('--no_apvst',
                        action='store_true',
                        default=False,
                        help='do NOT evaluate in the ap vs. time regime')

    parser.add_argument('--det_configs',
                        action='store_true',
                        default=False,
                        help='output detector statistics to det_configs')

    parser.add_argument('--inverse_prior',
                        action='store_true',
                        default=False,
                        help='use inverse prior class values')

    args = parser.parse_args()
    print(args)

    # If config file is not given, just run one experiment using default config
    if not args.config:
        configs = [DatasetPolicy.default_config]
    else:
        configs = load_configs(args.config)

    # Load the dataset
    dataset = Dataset('full_pascal_' + args.test_dataset)
    if args.first_n:
        dataset.images = dataset.images[:args.first_n]

    # Infer train_dataset
    if args.test_dataset == 'test':
        train_dataset = Dataset('full_pascal_trainval')
    elif args.test_dataset == 'val':
        train_dataset = Dataset('full_pascal_train')
    elif args.test_dataset == 'trainval':
        train_dataset = Dataset('full_pascal_trainval')
    else:
        None  # impossible by argparse settings

    # Only need to set training dataset values; evaluation gets it from there
    if args.inverse_prior:
        train_dataset.set_values('inverse_prior')

    # TODO: hack
    if args.first_n_train:
        train_dataset.images = train_dataset.images[:args.first_n_train]

    # In both the above cases, we use the val dataset for weights
    weights_dataset_name = 'full_pascal_val'

    dets_tables = []
    dets_tables_whole = []
    clses_tables_whole = []
    all_bounds = []

    plot_infos = []
    for config_f in configs:
        if args.suffix:
            config_f['suffix'] = args.suffix
        if args.bounds10:
            config_f['bounds'] = [0, 10]
        if args.bounds515:
            config_f['bounds'] = [5, 15]
        assert (not (args.bounds10 and args.bounds515))
        if args.inverse_prior:
            config_f['suffix'] += '_inverse_prior'
            config_f['values'] = 'inverse_prior'

        dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name,
                           **config_f)
        ev = Evaluation(config, dp)
        all_bounds.append(dp.bounds)
        plot_infos.append(
            dict((k, config_f[k]) for k in ('label', 'line', 'color')
                 if k in config_f))
        # output the det configs first
        if args.det_configs:
            dp.output_det_statistics()

        # evaluate in the AP vs. Time regime, unless told not to
        if not args.no_apvst:
            dets_table = ev.evaluate_vs_t(None, None, force=args.force)
            # dets_table_whole,clses_table_whole =
            # ev.evaluate_vs_t_whole(None,None,force=args.force)
            if mpi.mpi.comm_rank == 0:
                dets_tables.append(dets_table)
                # dets_tables_whole.append(dets_table_whole)
                # clses_tables_whole.append(clses_table_whole)

        # optionally, evaluate in the standard PR regime
        if args.wholeset_prs:
            ev.evaluate_detections_whole(None, force=args.force)

    # and plot the comparison if multiple config files were given
    if not args.no_apvst and len(configs) > 1 and mpi.mpi.comm_rank == 0:
        # filename of the final plot is the config file name
        dirname = config.get_evals_dir(dataset.get_name())
        filename = args.config
        if args.inverse_prior:
            filename += '_inverse_prior'

        # det avg
        ff = os.path.join(dirname, '%s_det_avg.png' % filename)
        ff_nl = os.path.join(dirname, '%s_det_avg_nl.png' % filename)

        # make sure directory exists
        skutil.makedirs(os.path.dirname(ff))

        Evaluation.plot_ap_vs_t(dets_tables,
                                ff,
                                all_bounds,
                                with_legend=True,
                                force=True,
                                plot_infos=plot_infos)
        Evaluation.plot_ap_vs_t(dets_tables,
                                ff_nl,
                                all_bounds,
                                with_legend=False,
                                force=True,
                                plot_infos=plot_infos)

        if False:
            # det whole
            ff = os.path.join(dirname, '%s_det_whole.png' % filename)
            ff_nl = os.path.join(dirname, '%s_det_whole_nl.png' % filename)
            Evaluation.plot_ap_vs_t(dets_tables_whole,
                                    ff,
                                    all_bounds,
                                    with_legend=True,
                                    force=True,
                                    plot_infos=plot_infos)
            Evaluation.plot_ap_vs_t(dets_tables_whole,
                                    ff_nl,
                                    all_bounds,
                                    with_legend=False,
                                    force=True,
                                    plot_infos=plot_infos)

            # cls whole
            ff = os.path.join(dirname, '%s_cls_whole.png' % filename)
            ff_nl = os.path.join(dirname, '%s_cls_whole_nl.png' % filename)
            Evaluation.plot_ap_vs_t(clses_tables_whole,
                                    ff,
                                    all_bounds,
                                    with_legend=True,
                                    force=True,
                                    plot_infos=plot_infos)
            Evaluation.plot_ap_vs_t(clses_tables_whole,
                                    ff_nl,
                                    all_bounds,
                                    with_legend=False,
                                    force=True,
                                    plot_infos=plot_infos)
 def setUp(self):
     d = Dataset(test_config, 'test_pascal_trainval').load_from_pascal('trainval', force=True)
     d2 = Dataset(test_config, 'test_pascal_test').load_from_pascal('test', force=True)
     config = {'detectors': ['csc_default']}
     self.dp = DatasetPolicy(test_config, d, d2, **config)
     self.bs = BeliefState(d, self.dp.actions)