예제 #1
0
 def __init__(self, dataset, L, numfolds=4):
     self.d = Dataset(dataset)
     self.e = Extractor()
     self.dense_codebook = self.e.get_codebook(self.d, 'dsift')
     self.sparse_codebook = self.e.get_codebook(self.d, 'sift')
     self.L = L
     self.numfolds = numfolds
예제 #2
0
 def test_learn_weights(self):
   dataset = Dataset('full_pascal_val')
   train_dataset = Dataset('full_pascal_train')
   dataset.images = dataset.images[:20]
   train_dataset.images = train_dataset.images[:20]
   dp = DatasetPolicy(dataset,train_dataset,self.weights_dataset_name,**self.config)
   weights = dp.learn_weights()
예제 #3
0
파일: dataset.py 프로젝트: raldam/timely
 def test_ground_truth_test(self):
     d = Dataset('test_pascal_val')
     gt = d.get_det_gt(with_diff=False, with_trun=False)
     correct = np.matrix([[139., 200., 69., 102., 18., 0., 0., 0.],
                          [123., 155., 93., 41., 17., 0., 0., 1.],
                          [239., 156., 69., 50., 8., 0., 0., 1.]])
     print(gt)
     assert np.all(gt.arr == correct)
예제 #4
0
 def __init__(self):
     self.d = Dataset('full_pascal_trainval')
     self.d_val = Dataset('full_pascal_test')
     self.cls = 'dog'
     suffix = 'default'
     self.csc = CSCClassifier(suffix, self.cls, self.d, self.d_val)
     csc_test = np.load(config.get_ext_dets_filename(self.d, 'csc_default'))
     self.dets = csc_test[()]
예제 #5
0
 def __init__(self):
   self.csc_trainval = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'csc_trainval'), 'r'))
   self.csc_test = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'csc_test'), 'r'))
   self.ext_csc_test = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'ext_csc_test'), 'r'))
   self.ext_csc_trainval = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'ext_csc_trainval'), 'r'))
   self.d_train = Dataset('full_pascal_trainval')
   self.trainval_gt = self.d_train.get_cls_ground_truth()
   self.d_test = Dataset('full_pascal_test')
   self.test_gt = self.d_test.get_cls_ground_truth()
예제 #6
0
 def test_ground_truth_test(self):
   d = Dataset('test_pascal_val')
   gt = d.get_det_gt(with_diff=False,with_trun=False)
   correct = np.matrix(
       [ [ 139.,  200.,   69.,  102.,   18.,   0., 0., 0.],
         [ 123.,  155.,   93.,   41.,   17.,   0., 0., 1.],
         [ 239.,  156.,   69.,   50.,    8.,   0., 0., 1.]])
   print(gt)
   assert np.all(gt.arr == correct)
예제 #7
0
def compute_error_vs_iterations(suffix, num_images, dataset):
    # assemble truth

    d = Dataset(dataset)
    truth = d.get_cls_ground_truth().arr
    truth = np.random.permutation(truth)[:num_images, :]
    num_classes = truth.shape[1]
    tt = ut.TicToc()

    lbp_times = [0] + [10**x for x in range(3)]
    lbp_times += [1000 + 1000 * x for x in range(10)]
    lbp_times += [10**x for x in [5]]
    #lbp_times = [3000]

    all_scores = np.zeros((num_classes, len(lbp_times), num_classes))
    all_times = np.zeros((num_classes, len(lbp_times)))

    counter = 0
    # do inference
    for itdex in range(len(lbp_times)):
        fm = FastinfModel(d, 'perfect', num_classes, lbp_time=lbp_times[itdex])
        for rowdex in range(comm_rank, truth.shape[0], comm_size):  # parallel
            obs = truth[rowdex, :].astype(int)
            taken = np.zeros(num_classes).astype(int)

            for num_obser in range(num_classes):
                counter += 1
                taken[np.argmax(fm.p_c - taken)] = 1

                tt.tic()
                fm.update_with_observations(taken, obs)
                utime = tt.toc(quiet=True)
                curr_score = compute_sq_error(obs, fm.p_c)
                all_scores[num_obser,
                           itdex, :] = np.add(all_scores[num_obser, itdex, :],
                                              curr_score)

                all_times[num_obser, itdex] += utime
                print '%d is at %d / %d :'%(comm_rank, counter, len(lbp_times)* \
                                               num_classes*num_images/float(comm_size)),curr_score

    all_scores /= num_images
    all_times /= num_images

    safebarrier(comm)
    all_scores = comm.reduce(all_scores)
    all_times = comm.reduce(all_times)
    if comm_rank == 0:  #parallel
        outfile = open('all_scores_' + suffix, 'w')
        cPickle.dump(all_scores, outfile)
        outfile.close()
        outfile = open('all_times_' + suffix, 'w')
        cPickle.dump(all_times, outfile)
        outfile.close()
예제 #8
0
class ClassifierConfig():
  def __init__(self, dataset, L, numfolds=4):
    self.d = Dataset(dataset)
    self.e = Extractor()
    self.dense_codebook = self.e.get_codebook(self.d, 'dsift')
    self.sparse_codebook = self.e.get_codebook(self.d, 'sift')
    self.L = L
    self.numfolds = numfolds
    
  def kfold(self):
    train_idx, val_idx = KFold(len(len(self.d.images), self.numfolds))
    self.d.create_folds(self.numfolds)
예제 #9
0
class ClassifierConfig():
    def __init__(self, dataset, L, numfolds=4):
        self.d = Dataset(dataset)
        self.e = Extractor()
        self.dense_codebook = self.e.get_codebook(self.d, 'dsift')
        self.sparse_codebook = self.e.get_codebook(self.d, 'sift')
        self.L = L
        self.numfolds = numfolds

    def kfold(self):
        train_idx, val_idx = KFold(len(len(self.d.images), self.numfolds))
        self.d.create_folds(self.numfolds)
예제 #10
0
def compute_error_vs_iterations(suffix, num_images, dataset):
  # assemble truth
  
  d = Dataset(dataset)
  truth = d.get_cls_ground_truth().arr
  truth = np.random.permutation(truth)[:num_images,:]  
  num_classes = truth.shape[1]  
  tt = ut.TicToc()
  
  lbp_times = [0] + [10**x for x in range(3)]
  lbp_times += [1000+1000*x for x in range(10)]
  lbp_times += [10**x for x in [5]]  
  #lbp_times = [3000]

  all_scores = np.zeros((num_classes, len(lbp_times), num_classes))
  all_times = np.zeros((num_classes, len(lbp_times)))
      
  counter = 0
  # do inference
  for itdex in range(len(lbp_times)):
    fm = FastinfModel(d, 'perfect', num_classes, lbp_time=lbp_times[itdex])
    for rowdex in range(comm_rank, truth.shape[0], comm_size): # parallel
      obs = truth[rowdex,:].astype(int)
      taken = np.zeros(num_classes).astype(int)
      
      for num_obser in range(num_classes):            
        counter += 1        
        taken[np.argmax(fm.p_c-taken)] = 1
        
        tt.tic()
        fm.update_with_observations(taken, obs)
        utime = tt.toc(quiet=True)     
        curr_score = compute_sq_error(obs, fm.p_c) 
        all_scores[num_obser, itdex, :] = np.add(all_scores[num_obser, itdex, :], curr_score)
          
        all_times[num_obser, itdex] += utime
        print '%d is at %d / %d :'%(comm_rank, counter, len(lbp_times)* \
                                       num_classes*num_images/float(comm_size)),curr_score
    
  all_scores /= num_images
  all_times /= num_images        
    
  safebarrier(comm)
  all_scores = comm.reduce(all_scores)
  all_times = comm.reduce(all_times)
  if comm_rank == 0: #parallel
    outfile = open('all_scores_'+suffix,'w')
    cPickle.dump(all_scores,outfile)
    outfile.close()
    outfile = open('all_times_'+suffix,'w')
    cPickle.dump(all_times,outfile)
    outfile.close()    
예제 #11
0
 def __init__(self):
     self.dataset = Dataset('test_pascal_val')
     self.train_dataset = Dataset('test_pascal_train')
     self.weights_dataset_name = 'test_pascal_val'
     self.config = {
         'suffix': 'default',
         'detectors':
         ['perfect'],  # perfect,perfect_with_noise,dpm,csc_default,csc_half
         'policy_mode': 'random',
         'bounds': None,
         'weights_mode':
         'manual_1'  # manual_1, manual_2, manual_3, greedy, rl
     }
     self.dp = DatasetPolicy(self.dataset, self.train_dataset,
                             self.weights_dataset_name, **self.config)
예제 #12
0
 def __init__(self, dataset, L, numfolds=4):
   self.d = Dataset(dataset)
   self.e = Extractor()
   self.dense_codebook = self.e.get_codebook(self.d, 'dsift')
   self.sparse_codebook = self.e.get_codebook(self.d, 'sift')
   self.L = L
   self.numfolds = numfolds
예제 #13
0
def run():
  dataset = Dataset('full_pascal_test')
  train_dataset = Dataset('full_pascal_trainval')
  cls = 'dog'
  rtype = '1big_2small'
  args = 0.5
  detector = 'csc_default'
  from synthetic.dataset_policy import DatasetPolicy
  all_dets = DatasetPolicy.load_ext_detections(dataset, detector)
  cls_ind = dataset.get_ind(cls)
  dets = all_dets.filter_on_column('cls_ind',cls_ind,omit=True)  
  ext_det = ExternalDetectorRegions(dataset, train_dataset, cls, dets, detector, rtype, args)
  img = dataset.images[13]  # Just some random image...where did the get_image_by_name go?
  print img.size
  print ext_det.detect(img, 0)
  print ext_det.detect(img, 1)
  print ext_det.detect(img, 2)
예제 #14
0
파일: evaluation.py 프로젝트: raldam/timely
class TestEvaluationSynthetic:
    def __init__(self):
        self.d = Dataset('test_data2', force=True)
        self.classes = ["A", "B", "C"]
        self.det_gt = self.d.get_det_gt()

    def test(self):
        scores = np.ones(self.det_gt.shape[0])
        dets = self.det_gt.append_column('score', scores)

        scores = np.ones(self.d.get_det_gt_for_class('A').shape[0])
        dets_just_A = self.d.get_det_gt_for_class('A')
        dets_just_A = dets_just_A.append_column('score', scores)

        self.d.set_values('uniform')
        assert (np.all(self.d.values == 1. / 3 * np.ones(len(self.classes))))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.33333333333333))

        self.d.set_values('inverse_prior')
        assert (np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.25))
예제 #15
0
class TestEvaluationSynthetic:
  def __init__(self):
    self.d = Dataset('test_data2',force=True)
    self.classes = ["A","B","C"]
    self.det_gt = self.d.get_det_gt()

  def test(self):
    scores = np.ones(self.det_gt.shape[0])
    dets = self.det_gt.append_column('score',scores)

    scores = np.ones(self.d.get_det_gt_for_class('A').shape[0])
    dets_just_A = self.d.get_det_gt_for_class('A')
    dets_just_A = dets_just_A.append_column('score',scores)

    self.d.set_values('uniform')
    assert(np.all(self.d.values == 1./3 * np.ones(len(self.classes))))
    dp = DatasetPolicy(self.d,self.d,detector='perfect')
    ev = Evaluation(dp)
    ap = ev.compute_det_map(dets,self.det_gt)
    assert(ap==1)
    ap = ev.compute_det_map(dets_just_A,self.det_gt)
    print(ap)
    assert(ut.fequal(ap, 0.33333333333333))

    self.d.set_values('inverse_prior')
    assert(np.all(self.d.values == np.array([0.25,0.25,0.5])))
    dp = DatasetPolicy(self.d,self.d,detector='perfect')
    ev = Evaluation(dp)
    ap = ev.compute_det_map(dets,self.det_gt)
    assert(ap==1)
    ap = ev.compute_det_map(dets_just_A,self.det_gt)
    print(ap)
    assert(ut.fequal(ap, 0.25))
예제 #16
0
def run():
    dataset = Dataset('full_pascal_test')
    train_dataset = Dataset('full_pascal_trainval')
    cls = 'dog'
    rtype = '1big_2small'
    args = 0.5
    detector = 'csc_default'
    from synthetic.dataset_policy import DatasetPolicy
    all_dets = DatasetPolicy.load_ext_detections(dataset, detector)
    cls_ind = dataset.get_ind(cls)
    dets = all_dets.filter_on_column('cls_ind', cls_ind, omit=True)
    ext_det = ExternalDetectorRegions(dataset, train_dataset, cls, dets,
                                      detector, rtype, args)
    img = dataset.images[
        13]  # Just some random image...where did the get_image_by_name go?
    print img.size
    print ext_det.detect(img, 0)
    print ext_det.detect(img, 1)
    print ext_det.detect(img, 2)
예제 #17
0
파일: evaluation.py 프로젝트: raldam/timely
 def __init__(self):
     self.csc_trainval = cPickle.load(
         open(
             os.path.join(config.get_ext_test_support_dir(),
                          'csc_trainval'), 'r'))
     self.csc_test = cPickle.load(
         open(os.path.join(config.get_ext_test_support_dir(), 'csc_test'),
              'r'))
     self.ext_csc_test = cPickle.load(
         open(
             os.path.join(config.get_ext_test_support_dir(),
                          'ext_csc_test'), 'r'))
     self.ext_csc_trainval = cPickle.load(
         open(
             os.path.join(config.get_ext_test_support_dir(),
                          'ext_csc_trainval'), 'r'))
     self.d_train = Dataset('full_pascal_trainval')
     self.trainval_gt = self.d_train.get_cls_ground_truth()
     self.d_test = Dataset('full_pascal_test')
     self.test_gt = self.d_test.get_cls_ground_truth()
예제 #18
0
 def __init__(self):
   self.dataset = Dataset('test_pascal_val')
   self.train_dataset = Dataset('test_pascal_train')
   self.weights_dataset_name = 'test_pascal_val'
   self.config = {
     'suffix': 'default',
     'detectors': ['perfect'], # perfect,perfect_with_noise,dpm,csc_default,csc_half
     'policy_mode': 'random',
     'bounds': None,
     'weights_mode': 'manual_1' # manual_1, manual_2, manual_3, greedy, rl
   }
   self.dp = DatasetPolicy(
     self.dataset,self.train_dataset,self.weights_dataset_name,**self.config)
예제 #19
0
파일: dataset.py 프로젝트: raldam/timely
 def test_kfold(self):
     """
 'sizes' here are empirical values over the trainval set.
 """
     d = Dataset('full_pascal_trainval')
     numfolds = 4
     d.create_folds(numfolds)
     cls = 'dog'
     sizes = [314, 308, 321, 320]
     for i in range(len(d.folds)):
         d.next_folds()
         pos = d.get_pos_samples_for_fold_class(cls)
         neg = d.get_neg_samples_for_fold_class(cls, pos.shape[0])
         assert (pos.shape[0] == sizes[i])
         assert (neg.shape[0] == sizes[i])
예제 #20
0
 def test_learn_weights(self):
     dataset = Dataset('full_pascal_val')
     train_dataset = Dataset('full_pascal_train')
     dataset.images = dataset.images[:20]
     train_dataset.images = train_dataset.images[:20]
     dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name,
                        **self.config)
     weights = dp.learn_weights()
예제 #21
0
def test():
    dataset = Dataset('full_pascal_trainval')
    fm = FastinfModel(dataset, 'perfect', 20)
    # NOTE: just took values from a run of the thing

    prior_correct = [
        float(x) for x in
        "0.050543  0.053053  0.073697  0.038331  0.050954  0.041879  0.16149\
    0.068721  0.10296   0.026837  0.043779  0.087683  0.063447  0.052205\
    0.41049   0.051664  0.014211  0.068361  0.056969  0.05046".split()
    ]
    np.testing.assert_almost_equal(fm.p_c, prior_correct, 4)

    observations = np.zeros(20)
    taken = np.zeros(20)
    fm.update_with_observations(taken, observations)
    np.testing.assert_almost_equal(fm.p_c, prior_correct, 4)
    observations[5] = 1
    taken[5] = 1
    fm.update_with_observations(taken, observations)
    print fm.p_c
    correct = [
        float(x) for x in
        "0.027355   0.11855    0.027593   0.026851   0.012569   0.98999    0.52232\
    0.017783   0.010806   0.015199   0.0044641  0.02389    0.033602   0.089089\
    0.50297    0.0083272  0.0088274  0.0098522  0.034259   0.0086298".split()
    ]
    np.testing.assert_almost_equal(fm.p_c, correct, 4)
    observations[15] = 0
    taken[15] = 1
    fm.update_with_observations(taken, observations)
    correct = [
        float(x) for x in
        "2.73590000e-02   1.19030000e-01   2.75500000e-02   2.68760000e-02 \
   1.23920000e-02   9.90200000e-01   5.25320000e-01   1.76120000e-02 \
   1.05030000e-02   1.52130000e-02   4.26410000e-03   2.38250000e-02 \
   3.36870000e-02   8.96450000e-02   5.04300000e-01   8.71880000e-05 \
   8.82630000e-03   9.55290000e-03   3.43240000e-02   8.44510000e-03".split()
    ]
    np.testing.assert_almost_equal(fm.p_c, correct)

    # reinit_marginals
    fm.reset()
    np.testing.assert_equal(fm.p_c, prior_correct)

    print(fm.cache)
예제 #22
0
 def test_kfold(self):
   """
   'sizes' here are empirical values over the trainval set.
   """
   d = Dataset('full_pascal_trainval')
   numfolds = 4
   d.create_folds(numfolds)
   cls = 'dog'
   sizes = [314, 308, 321, 320]
   for i in range(len(d.folds)):
     d.next_folds()
     pos = d.get_pos_samples_for_fold_class(cls)
     neg = d.get_neg_samples_for_fold_class(cls, pos.shape[0])
     assert(pos.shape[0] == sizes[i])
     assert(neg.shape[0] == sizes[i])
예제 #23
0
Created on Nov 21, 2011

@author: Tobias Baumgartner
'''
from common_imports import *
from common_mpi import *
import synthetic.config as config

from synthetic.dataset import Dataset

from synthetic.classifier import Classifier

if __name__=='__main__':
  
  train_set = 'full_pascal_train'
  train_dataset = Dataset(train_set)  
  images = train_dataset.images
  classes = config.pascal_classes
  suffix = 'default'
  filename = config.get_ext_dets_filename(train_dataset, 'csc_'+suffix)
  csc_train = np.load(filename)
  csc_train = csc_train[()]  
  csc_train = csc_train.subset(['score', 'cls_ind', 'img_ind'])
  score = csc_train.subset(['score']).arr
  classif = Classifier()
  csc_train.arr = classif.normalize_dpm_scores(csc_train.arr)

  numpos = train_dataset.get_ground_truth().shape[0]
  
  threshs = np.arange(0,1.01,0.05)
  
예제 #24
0
 def setup(self):
   self.d = Dataset('test_pascal_train',force=True)
예제 #25
0
파일: dataset.py 프로젝트: raldam/timely
 def test_get_pos_windows(self):
     d = Dataset('test_pascal_val')
예제 #26
0
"""
Runner script to output cooccurrence statistics for the synthetic
and PASCAL datasets.
"""

from synthetic.common_imports import *
from synthetic.dataset import Dataset

datasets = [
    'synthetic', 'full_pascal_train', 'full_pascal_trainval',
    'full_pascal_val', 'full_pascal_test'
]

for dataset in datasets:
    d = Dataset(dataset)
    f = d.plot_coocurrence()
    f = d.plot_coocurrence(second_order=True)
    f = d.plot_distribution()
예제 #27
0
파일: detector.py 프로젝트: raldam/timely
 def setup(self):
   self.dataset = Dataset('test_pascal_val')
   self.train_dataset = Dataset('test_pascal_train')
예제 #28
0
class TestDatasetJson:
  def setup(self):
    self.d = Dataset('test_data1',force=True)
    self.classes = ["A","B","C"]

  def test_load(self):
    assert(self.d.num_images() == 4)
    assert(self.d.classes == self.classes)

  def test_get_det_gt(self):
    gt = self.d.get_det_gt(with_diff=True,with_trun=False)
    df = Table(
      np.array([[ 0.,  0.,  0.,  0.,  0.,  0, 0, 0.],
       [ 1.,  1.,  1.,  1.,  1.,  0, 0, 0.],
       [ 1.,  1.,  1.,  0.,  0.,  0, 0, 1.],
       [ 0.,  0.,  0.,  0.,  1.,  0, 0, 2.],
       [ 0.,  0.,  0.,  0.,  2.,  0, 0, 3.],
       [ 1.,  1.,  1.,  1.,  2.,  0, 0, 3.]]),
       ['x','y','w','h','cls_ind','diff','trun','img_ind'])
    print(gt)
    print(df)
    assert(gt == df)

  def test_get_cls_counts_json(self):
    arr = np.array(
      [ [ 1, 1, 0],
        [ 1, 0, 0],
        [ 0, 1, 0],
        [ 0, 0, 2]])
    print(self.d.get_cls_counts())
    assert(np.all(self.d.get_cls_counts() == arr))

  def test_get_cls_ground_truth_json(self):
    table = Table(
      np.array([ [ True, True, False],
        [ True, False, False],
        [ False, True, False],
        [ False, False, True] ]), ["A","B","C"])
    assert(self.d.get_cls_ground_truth()==table)

  def test_det_ground_truth_for_class_json(self):
    gt = self.d.get_det_gt_for_class("A",with_diff=True,with_trun=True)
    arr = np.array(
      [[ 0.,  0.,  0.,  0.,  0., 0., 0, 0.],
       [ 1.,  1.,  1.,  0.,  0., 0., 0., 1.]])
    cols = ['x','y','w','h','cls_ind','diff','trun','img_ind']
    print(gt.arr)
    assert(np.all(gt.arr == arr))
    assert(gt.cols == cols)

    # no diff or trun
    gt = self.d.get_det_gt_for_class("A",with_diff=False,with_trun=False)
    arr = np.array(
      [[ 0.,  0.,  0.,  0.,  0., 0., 0, 0.],
       [ 1.,  1.,  1.,  0.,  0., 0., 0., 1.]])
    cols = ['x','y','w','h','cls_ind','diff','trun','img_ind']
    print(gt.arr)
    assert(np.all(gt.arr == arr))
    assert(gt.cols == cols)

  def test_set_values(self):
    assert(np.all(self.d.values == 1/3. * np.ones(len(self.classes))))
    self.d.set_values('uniform')
    assert(np.all(self.d.values == 1/3. * np.ones(len(self.classes))))
    self.d.set_values('inverse_prior')
    print(self.d.values)
    assert(np.all(self.d.values == np.array([0.25,0.25,0.5])))
예제 #29
0
class TestDatasetPolicy:
  def __init__(self):
    self.dataset = Dataset('test_pascal_val')
    self.train_dataset = Dataset('test_pascal_train')
    self.weights_dataset_name = 'test_pascal_val'
    self.config = {
      'suffix': 'default',
      'detectors': ['perfect'], # perfect,perfect_with_noise,dpm,csc_default,csc_half
      'policy_mode': 'random',
      'bounds': None,
      'weights_mode': 'manual_1' # manual_1, manual_2, manual_3, greedy, rl
    }
    self.dp = DatasetPolicy(
      self.dataset,self.train_dataset,self.weights_dataset_name,**self.config)

  def test_run_on_dataset(self):
    # run on test dataset
    dets,clses,samples = self.dp.run_on_dataset(force=True) 
    assert(len(samples) == clses.shape[0])
    assert(len(samples) == self.dp.dataset.num_images()*len(self.dp.actions))
    train_dets,train_clses,train_samples = self.dp.run_on_dataset(train=True,force=True)
    assert(len(train_samples) == train_clses.shape[0])
    assert(len(train_samples) == self.dp.train_dataset.num_images()*len(self.dp.actions))

  def test_unique_samples(self):
    "Test the correctness of making a list of samples unique."
    dets,clses,samples = self.dp.run_on_dataset(force=True)
    new_sample = copy.deepcopy(samples[11])
    new_sample2 = copy.deepcopy(samples[11])
    new_sample2.dt = -40 # an unreasonable value
    assert(new_sample in samples)
    assert(new_sample2 not in samples)

  def test_output_det_statistics(self):
    self.dp.output_det_statistics()

  def test_learn_weights(self):
    dataset = Dataset('full_pascal_val')
    train_dataset = Dataset('full_pascal_train')
    dataset.images = dataset.images[:20]
    train_dataset.images = train_dataset.images[:20]
    dp = DatasetPolicy(dataset,train_dataset,self.weights_dataset_name,**self.config)
    weights = dp.learn_weights()

  def test_regress(self):
    dets,clses,samples = self.dp.run_on_dataset(force=True)
    weights,error = self.dp.regress(samples,'greedy')
    print "Weights after %d samples:\n %s"%(len(samples),weights)
    print "Error after %d samples: %s"%(len(samples),error)
    samples += samples
    weights,error = self.dp.regress(samples,'greedy')
    print "Weights after %d samples:\n %s"%(len(samples),weights)
    print "Error after %d samples: %s"%(len(samples),error)
    samples += samples
    weights,error = self.dp.regress(samples,'greedy')
    print "Weights after %d samples:\n %s"%(len(samples),weights)
    print "Error after %d samples: %s"%(len(samples),error)

  def test_load_weights(self):
    modes = ['manual_1','manual_2','manual_3']
    for mode in modes:
      print "%s weights:"%mode
      self.dp.weights_mode=mode
      self.dp.load_weights()
      print self.dp.get_reshaped_weights()
      assert(self.dp.weights.shape[0] == len(self.dp.actions)*BeliefState.num_features)
      self.dp.write_weights_image('temp_weights_%s.png'%mode)

  def test_perfect_detector(self):
    dets,clses,samples = self.dp.run_on_dataset(force=True)
    #embed()
    dets = dets.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])
    gt = self.dataset.get_det_gt()
    gt = gt.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])

    # TODO: does this make sense?
    dets.sort_by_column('x')
    gt.sort_by_column('x')
    print dets
    print gt
    assert(dets == gt)

  def test_load_dpm_detections(self):
    conf = dict(self.config)
    conf['detectors'] = ['dpm']
    policy = DatasetPolicy(self.dataset,self.train_dataset,**conf)
    assert(policy.detectors == ['dpm'])
    dets = policy.load_ext_detections(self.dataset,'dpm_may25',force=True)
    dets = dets.with_column_omitted('time')

    # load the ground truth dets, processed in Matlab
    # (timely/data/test_support/concat_dets.m)
    filename = os.path.join(config.test_support_dir, 'val_dets.mat')
    dets_correct = Table(
        scipy.io.loadmat(filename)['dets'],
        ['x1','y1','x2','y2','dummy','dummy','dummy','dummy','score','cls_ind','img_ind'],
        'dets_correct')
    dets_correct = dets_correct.subset(
        ['x1','y1','x2','y2','score','cls_ind','img_ind'])
    dets_correct.arr[:,:4] -= 1
    dets_correct.arr[:,:4] = BoundingBox.convert_arr_from_corners(
        dets_correct.arr[:,:4])
    dets_correct.cols = ['x','y','w','h','score','cls_ind','img_ind']
    
    print('----mine:')
    print(dets)
    print('----correct:')
    print(dets_correct)
    assert(dets_correct == dets)
예제 #30
0
파일: evaluation.py 프로젝트: raldam/timely
class TestEvaluationPerfect:
    def __init__(self):
        self.csc_trainval = cPickle.load(
            open(
                os.path.join(config.get_ext_test_support_dir(),
                             'csc_trainval'), 'r'))
        self.csc_test = cPickle.load(
            open(os.path.join(config.get_ext_test_support_dir(), 'csc_test'),
                 'r'))
        self.ext_csc_test = cPickle.load(
            open(
                os.path.join(config.get_ext_test_support_dir(),
                             'ext_csc_test'), 'r'))
        self.ext_csc_trainval = cPickle.load(
            open(
                os.path.join(config.get_ext_test_support_dir(),
                             'ext_csc_trainval'), 'r'))
        self.d_train = Dataset('full_pascal_trainval')
        self.trainval_gt = self.d_train.get_cls_ground_truth()
        self.d_test = Dataset('full_pascal_test')
        self.test_gt = self.d_test.get_cls_ground_truth()

    def setup(self):
        train_dataset = Dataset('test_pascal_train', force=True)
        dataset = Dataset('test_pascal_val', force=True)
        self.dp = DatasetPolicy(dataset, train_dataset, detector='perfect')
        self.evaluation = Evaluation(self.dp)

    def test_compute_pr_multiclass(self):
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'img_ind', 'diff']
        dets_cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind', 'img_ind']

        # two objects of different classes in the image, perfect detection
        arr = np.array([[0, 0, 10, 10, 0, 0, 0], [10, 10, 10, 10, 1, 0, 0]])
        gt = Table(arr, cols)

        dets_arr = np.array([[0, 0, 10, 10, -1, -1, 0, 0],
                             [10, 10, 10, 10, -1, -1, 1, 0]])
        dets = Table(dets_arr, dets_cols)

        # make sure gt and gt_cols aren't modified
        gt_arr_copy = gt.arr.copy()
        gt_cols_copy = list(gt.cols)
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        assert (np.all(gt.arr == gt_arr_copy))
        assert (gt_cols_copy == gt.cols)

        correct_ap = 1
        correct_rec = np.array([0.5, 1])
        correct_prec = np.array([1, 1])
        print((ap, rec, prec))
        assert (correct_ap == ap)
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # some extra detections to generate false positives
        dets_arr = np.array([[0, 0, 10, 10, -1, -1, 0, 0],
                             [0, 0, 10, 10, 0, -1, 0, 0],
                             [10, 10, 10, 10, 0, -1, 1, 0],
                             [10, 10, 10, 10, -1, -1, 1, 0]])
        dets = Table(dets_arr, dets_cols)

        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.5, 1, 1, 1])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # confirm that running on the same dets gives the same answer
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.5, 1, 1, 1])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # now let's add two objects of a different class to gt to lower recall
        arr = np.array([[0, 0, 10, 10, 0, 0, 0], [10, 10, 10, 10, 1, 0, 0],
                        [20, 20, 10, 10, 2, 0, 0], [30, 30, 10, 10, 2, 0, 0]])
        gt = Table(arr, cols)
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.25, 0.5, 0.5, 0.5])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # now call it with empty detections
        dets_arr = np.array([])
        dets = Table(dets_arr, dets_cols)
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_ap = 0
        correct_rec = np.array([0])
        correct_prec = np.array([0])
        print((ap, rec, prec))
        assert (np.all(correct_ap == ap))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

    def test_compute_cls_map(self):
        res = Evaluation.compute_cls_map(self.csc_trainval, self.trainval_gt)
        assert (round(res, 11) == 0.47206391958)

    def test_compute_cls_map_half(self):
        table_csc_half = Table()
        table_csc_half.cols = list(self.csc_trainval.cols)
        for _ in range(10):
            rand_inds = np.random.permutation(range(5011))[:2500]
            table_csc_half.arr = self.csc_trainval.arr[rand_inds, :]
            res = Evaluation.compute_cls_map(table_csc_half, self.trainval_gt)
            assert (round(res, 6) > .45)

    def test_compute_cls_map_gt(self):
        res = Evaluation.compute_cls_map(self.trainval_gt, self.trainval_gt)
        assert (round(res, 6) == 1)

    def test_compute_cls_map_gt_half(self):
        rand_inds = np.random.permutation(range(5011))[:2500]
        table_gt_half = Table()
        table_gt_half.arr = np.hstack(
            (self.trainval_gt.arr, np.array(np.arange(5011), ndmin=2).T))
        table_gt_half.arr = table_gt_half.arr[rand_inds, :]
        table_gt_half.cols = list(self.trainval_gt.cols) + ['img_ind']
        res = Evaluation.compute_cls_map(table_gt_half, self.trainval_gt)
        assert (round(res, 6) == 1)

    def test_compute_cls_map_random_clf(self):
        clf_table = Table()
        num_test = 10
        ress = np.zeros((num_test, ))
        for idx in range(num_test):
            clf_table.arr = np.hstack(
                (np.random.rand(5011, 20), np.array(np.arange(5011),
                                                    ndmin=2).T))
            clf_table.cols = list(self.trainval_gt.cols) + ['img_ind']
            res = Evaluation.compute_cls_map(clf_table, self.trainval_gt)
            ress[idx] = res
        assert (np.mean(ress) < 0.09)

    def test_other_scores(self):
        print 'csc_test', Evaluation.compute_cls_map(self.csc_test,
                                                     self.test_gt)
        print 'csc_trainval', Evaluation.compute_cls_map(
            self.csc_trainval, self.trainval_gt)

        print 'ext_test', Evaluation.compute_cls_map(self.ext_csc_test,
                                                     self.test_gt)
        print 'ext_trainval', Evaluation.compute_cls_map(
            self.ext_csc_trainval, self.trainval_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_test),
                                'dp', 'table_chi2_20')
        ext_table_chi2_20 = cPickle.load(open(filename, 'r'))
        print 'ext_chi2_20_test', Evaluation.compute_cls_map(
            ext_table_chi2_20, self.test_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_train),
                                'dp', 'table_chi2_20')
        ext_table_chi2_20_tv = cPickle.load(open(filename, 'r'))
        print 'ext_chi2_20_trainval', Evaluation.compute_cls_map(
            ext_table_chi2_20_tv, self.trainval_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_test),
                                'dp', 'table_rbf_20')
        ext_table_rbf_20 = cPickle.load(open(filename, 'r'))
        print 'ext_rbf_20_test', Evaluation.compute_cls_map(
            ext_table_rbf_20, self.test_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_train),
                                'dp', 'table_rbf_20')
        ext_table_rbf_20_tv = cPickle.load(open(filename, 'r'))
        print 'ext_rbf_20_trainval', Evaluation.compute_cls_map(
            ext_table_rbf_20_tv, self.trainval_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_test),
                                'dp', 'table_linear_20')
        ext_linear_20_test = cPickle.load(open(filename, 'r'))
        print 'ext_linear_test', Evaluation.compute_cls_map(
            ext_linear_20_test, self.test_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_train),
                                'dp', 'table_linear_20')
        ext_table_linear_20 = cPickle.load(open(filename, 'r'))
        print 'ext_linear_20_trainval', Evaluation.compute_cls_map(
            ext_table_linear_20, self.trainval_gt)

        filename = 'tab_linear_5'
        ext_tab_lin_5 = cPickle.load(open(filename, 'r'))
        print 'ext_tab_lin_5_trainval', Evaluation.compute_cls_map(
            ext_tab_lin_5, self.trainval_gt)
예제 #31
0
파일: evaluation.py 프로젝트: raldam/timely
 def __init__(self):
     self.d = Dataset('test_data2', force=True)
     self.classes = ["A", "B", "C"]
     self.det_gt = self.d.get_det_gt()
예제 #32
0
    codebook = e.get_codebook(d, 'sift')
    ut.makedirs(join(config.data_dir, 'jumping_window','lookup'))
    train_jumping_windows(d, codebook, use_scale=use_scale,trun=True,diff=False, feature=feature)

  
  debug = True
  just_eval = True

  if just_eval:
    basedir = join(config.data_dir, 'jumping_window')
    foldname_det = join(basedir, 'detections')    
    foldname_lookup = join(basedir, 'lookup')
    ut.makedirs(foldname_det)
    
    print 'start testing on node', comm_rank
    dtest = Dataset('full_pascal_test')
    #for cls_idx, cls in enumerate(all_classes):
    for cls_idx, cls in enumerate([all_classes[0]]):
      #cls=all_classes
      gt_t = dtest.get_ground_truth_for_class(cls, with_diff=False,
          with_trun=True)
      e = Extractor()
      codebook = e.get_codebook(dtest, 'sift')
            
      filename_lookup = join(foldname_lookup,cls)
      store_file = open(filename_lookup, 'r')
      bbinfo = cPickle.load(store_file)
            
      test_gt = gt_t.arr
      npos = test_gt.shape[0]
      test_imgs = test_gt[:,gt_t.cols.index('img_ind')]
예제 #33
0
    print table 
    cPickle.dump(table, open('table','w'))
    print 'saved'
  return table

def conv(d_train, table_arr):
  table = Table()
  #table_arr = cPickle.load(open('table_linear_5','r'))
  table.arr = np.hstack((table_arr, np.array(np.arange(table_arr.shape[0]),ndmin=2).T))
  table.cols = d_train.classes + ['img_ind']
  print table
  #cPickle.dump(table, open('tab_linear_5','w'))
  return table
  
if __name__=='__main__':
  d_train = Dataset('full_pascal_trainval')
  d_val = Dataset('full_pascal_val')

  train_gt = d_train.get_cls_ground_truth()
  val_gt = d_val.get_cls_ground_truth()

  if comm_rank == 0:
    filename = os.path.join(config.get_classifier_dataset_dirname(CSCClassifier('default','dog', d_train, d_val), d_train),'crossval.txt')
  
  kernels =  ['linear']
  Cs = [50]
  
  settings = list(itertools.product(kernels, Cs))
  
  for setin in settings:
    kernel = setin[0]
예제 #34
0
#  testY = load_from_mat('testY.mat', 'testY')
#  print testY
   
#  model = train_svm(x, y)         
#  print 'result:'  
#  result = svm_predict(x0, model)
#  print result
  

#  d = Dataset('full_pascal_trainval')
#  d.evaluate_get_pos_windows(0.5)

#if False:
  randomize = not os.path.exists('/home/tobibaum')
  
  d = Dataset('full_pascal_train')
  dtest = Dataset('full_pascal_val')
  
  e = Extractor()
  
  classes = config.pascal_classes  
  num_words = 3000
  iters = 10
  feature_type = 'dsift'
  codebook_samples = 15
  num_pos = 'max'
  testsize = 'max' 
  kernel = 'chi2'
  
#  num_pos = 3
#  testsize = 4
예제 #35
0
def main():
    parser = argparse.ArgumentParser(
        description='Execute different functions of our system')
    parser.add_argument('--first_n',
                        type=int,
                        help='only take the first N images in the datasets')
    parser.add_argument('--name',
                        help='name for this run',
                        default='default',
                        choices=['default', 'nolateral', 'nohal', 'halfsize'])
    parser.add_argument('--force',
                        action='store_true',
                        default=False,
                        help='force overwrite')

    args = parser.parse_args()
    print(args)

    #configuration class
    class config(object):
        pass

    cfg = config()
    cfg.testname = "../ctfdet/data/finalRL/%s2_test"  #object model
    cfg.bottomup = False  #use complete search
    cfg.resize = 1.0  #resize the input image
    cfg.hallucinate = True  #use HOGs up to 4 pixels
    cfg.initr = 1  #initial radious of the CtF search
    cfg.ratio = 1  #radious at the next levels
    cfg.deform = True  #use deformation
    cfg.usemrf = True  #use lateral constraints

    if args.name == 'default':
        cfg
        # sticking with the default params
    elif args.name == 'nolateral':
        cfg.usemrf = False
    elif args.name == 'nohal':
        cfg.hallucinate = False
    elif args.name == 'halfsize':
        cfg.resize = 0.5

    # f**k it, do both
    test_datasets = ['val', 'test', 'train']
    for test_dataset in test_datasets:
        # Load the dataset
        dataset = Dataset('full_pascal_' + test_dataset)
        if args.first_n:
            dataset.images = dataset.images[:args.first_n]

        # create directory for storing cached detections
        dirname = './temp_data'
        if os.path.exists('/u/sergeyk'):
            dirname = '/u/vis/x1/sergeyk/object_detection'
        dirname = dirname + '/ctfdets/%s' % (args.name)
        ut.makedirs(dirname)

        num_images = len(dataset.images)
        for img_ind in range(comm_rank, num_images, comm_size):
            # check for existing det
            image = dataset.images[img_ind]
            filename = os.path.join(dirname, image.name + '.npy')
            if os.path.exists(filename) and not args.force:
                #table = np.load(filename)[()]
                continue

            #read the image
            imname = dataset.get_image_filename(img_ind)
            img = util2.myimread(imname, resize=cfg.resize)
            #compute the hog pyramid
            f = pyrHOG2.pyrHOG(img,
                               interv=10,
                               savedir="",
                               notsave=True,
                               notload=True,
                               hallucinate=cfg.hallucinate,
                               cformat=True)

            #for each class
            all_dets = []
            for ccls in dataset.classes:
                t = time.time()
                cls_ind = dataset.get_ind(ccls)
                print "%s Img %d/%d Class: %s" % (test_dataset, img_ind + 1,
                                                  num_images, ccls)
                #load the class model
                m = util2.load("%s%d.model" % (cfg.testname % ccls, 7))
                res = []
                t1 = time.time()
                #for each aspect
                for clm, m in enumerate(m):
                    #scan the image with left and right models
                    res.append(
                        pyrHOG2RL.detectflip(f,
                                             m,
                                             None,
                                             hallucinate=cfg.hallucinate,
                                             initr=cfg.initr,
                                             ratio=cfg.ratio,
                                             deform=cfg.deform,
                                             bottomup=cfg.bottomup,
                                             usemrf=cfg.usemrf,
                                             small=False,
                                             cl=clm))
                fuse = []
                numhog = 0
                #fuse the detections
                for mix in res:
                    tr = mix[0]
                    fuse += mix[1]
                    numhog += mix[3]
                rfuse = tr.rank(fuse, maxnum=300)
                nfuse = tr.cluster(rfuse, ovr=0.3, inclusion=False)
                #print "Number of computed HOGs:",numhog
                time_elapsed = time.time() - t
                print "Elapsed time: %.3f s" % time_elapsed

                bboxes = [nf['bbox'] for nf in nfuse]
                scores = [nf['scr'] for nf in nfuse]
                assert (len(bboxes) == len(scores))
                if len(bboxes) > 0:
                    arr = np.zeros((len(bboxes), 7))
                    arr[:, :4] = BoundingBox.convert_arr_from_corners(
                        np.array(bboxes))
                    arr[:, 4] = scores
                    arr[:, 5] = time_elapsed
                    arr[:, 6] = cls_ind
                    all_dets.append(arr)
            cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind']
            if len(all_dets) > 0:
                all_dets = np.concatenate(all_dets, 0)
            else:
                all_dets = np.array([])
            table = Table(all_dets, cols)
            np.save(filename, table)
예제 #36
0
파일: dataset.py 프로젝트: raldam/timely
class TestDatasetJson:
    def setup(self):
        self.d = Dataset('test_data1', force=True)
        self.classes = ["A", "B", "C"]

    def test_load(self):
        assert (self.d.num_images() == 4)
        assert (self.d.classes == self.classes)

    def test_get_det_gt(self):
        gt = self.d.get_det_gt(with_diff=True, with_trun=False)
        df = Table(
            np.array([[0., 0., 0., 0., 0., 0, 0, 0.],
                      [1., 1., 1., 1., 1., 0, 0, 0.],
                      [1., 1., 1., 0., 0., 0, 0, 1.],
                      [0., 0., 0., 0., 1., 0, 0, 2.],
                      [0., 0., 0., 0., 2., 0, 0, 3.],
                      [1., 1., 1., 1., 2., 0, 0, 3.]]),
            ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind'])
        print(gt)
        print(df)
        assert (gt == df)

    def test_get_cls_counts_json(self):
        arr = np.array([[1, 1, 0], [1, 0, 0], [0, 1, 0], [0, 0, 2]])
        print(self.d.get_cls_counts())
        assert (np.all(self.d.get_cls_counts() == arr))

    def test_get_cls_ground_truth_json(self):
        table = Table(
            np.array([[True, True, False], [True, False, False],
                      [False, True, False], [False, False, True]]),
            ["A", "B", "C"])
        assert (self.d.get_cls_ground_truth() == table)

    def test_det_ground_truth_for_class_json(self):
        gt = self.d.get_det_gt_for_class("A", with_diff=True, with_trun=True)
        arr = np.array([[0., 0., 0., 0., 0., 0., 0, 0.],
                        [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert (np.all(gt.arr == arr))
        assert (gt.cols == cols)

        # no diff or trun
        gt = self.d.get_det_gt_for_class("A", with_diff=False, with_trun=False)
        arr = np.array([[0., 0., 0., 0., 0., 0., 0, 0.],
                        [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert (np.all(gt.arr == arr))
        assert (gt.cols == cols)

    def test_set_values(self):
        assert (np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_values('uniform')
        assert (np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_values('inverse_prior')
        print(self.d.values)
        assert (np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
예제 #37
0
파일: dataset.py 프로젝트: raldam/timely
class TestDatasetPascal:
    def setup(self):
        self.d = Dataset('test_pascal_train', force=True)

    def test_ground_truth_pascal_train(self):
        assert (self.d.num_classes() == 20)
        assert ('dog' in self.d.classes)

    def test_ground_truth_for_class_pascal(self):
        correct = np.array([[48., 240., 148., 132., 11., 0., 1., 0.]])
        ans = self.d.get_det_gt_for_class("dog")
        print ans
        assert np.all(ans.arr == correct)

    def test_neg_samples(self):
        # unlimited negative examples
        indices = self.d.get_neg_samples_for_class("dog",
                                                   with_diff=True,
                                                   with_trun=True)
        correct = np.array([1, 2])
        assert (np.all(indices == correct))

        # maximum 1 negative example
        indices = self.d.get_neg_samples_for_class("dog",
                                                   1,
                                                   with_diff=True,
                                                   with_trun=True)
        correct1 = np.array([1])
        correct2 = np.array([2])
        print(indices)
        assert (np.all(indices == correct1) or np.all(indices == correct2))

    def test_pos_samples(self):
        indices = self.d.get_pos_samples_for_class("dog")
        correct = np.array([0])
        assert (np.all(indices == correct))

    def test_ground_truth_test(self):
        d = Dataset('test_pascal_val')
        gt = d.get_det_gt(with_diff=False, with_trun=False)
        correct = np.matrix([[139., 200., 69., 102., 18., 0., 0., 0.],
                             [123., 155., 93., 41., 17., 0., 0., 1.],
                             [239., 156., 69., 50., 8., 0., 0., 1.]])
        print(gt)
        assert np.all(gt.arr == correct)

    def test_get_pos_windows(self):
        d = Dataset('test_pascal_val')
        # TODO

    def test_kfold(self):
        """
    'sizes' here are empirical values over the trainval set.
    """
        d = Dataset('full_pascal_trainval')
        numfolds = 4
        d.create_folds(numfolds)
        cls = 'dog'
        sizes = [314, 308, 321, 320]
        for i in range(len(d.folds)):
            d.next_folds()
            pos = d.get_pos_samples_for_fold_class(cls)
            neg = d.get_neg_samples_for_fold_class(cls, pos.shape[0])
            assert (pos.shape[0] == sizes[i])
            assert (neg.shape[0] == sizes[i])
예제 #38
0
      return np.zeros((1,intervals+1))
    dpm = feats.subset(['score', 'cls_ind', 'img_ind'])
    img_dpm = dpm.filter_on_column('img_ind', img, omit=True)
    if img_dpm.arr.size == 0:
      print 'empty vector'
      return np.zeros((1,intervals+1))
    cls_dpm = img_dpm.filter_on_column('cls_ind', cls, omit=True)
    hist = self.compute_histogram(cls_dpm.arr, intervals, lower, upper)
    vector = np.zeros((1, intervals+1))
    vector[0,0:-1] = hist
    vector[0,-1] = img_dpm.shape[0]
    return vector
  
if __name__=='__main__':
  train_set = 'full_pascal_train'
  train_dataset = Dataset(train_set)  
  dpm_dir = os.path.join(config.res_dir, 'dpm_dets')
  filename = os.path.join(dpm_dir, train_set + '_dets_all_may25_DP.npy')
  dpm_train = np.load(filename)
  dpm_train = dpm_train[()]  
  dpm_train = dpm_train.subset(['score', 'cls_ind', 'img_ind'])
  dpm_classif = DPMClassifier()
  dpm_train.arr = dpm_classif.normalize_dpm_scores(dpm_train.arr)
  
  val_set = 'full_pascal_val'
  test_dataset = Dataset(val_set)  
  dpm_test_dir = os.path.join(config.res_dir, 'dpm_dets')
  filename = os.path.join(dpm_dir, val_set + '_dets_all_may25_DP.npy')
  dpm_test = np.load(filename)
  dpm_test = dpm_test[()]  
  dpm_test = dpm_test.subset(['score', 'cls_ind', 'img_ind'])
예제 #39
0
def main():
    parser = argparse.ArgumentParser(description="Run experiments with the timely detection system.")

    parser.add_argument(
        "--test_dataset",
        choices=["val", "test", "trainval"],
        default="val",
        help="""Dataset to use for testing. Run on val until final runs.
    The training dataset is inferred (val->train; test->trainval; trainval->trainval).""",
    )

    parser.add_argument("--first_n", type=int, help="only take the first N images in the test dataset")

    parser.add_argument("--first_n_train", type=int, help="only take the first N images in the train dataset")

    parser.add_argument(
        "--config",
        help="""Config file name that specifies the experiments to run.
    Give name such that the file is configs/#{name}.json or configs/#{name}/
    In the latter case, all files within the directory will be loaded.""",
    )

    parser.add_argument("--suffix", help="Overwrites the suffix in the config(s).")

    parser.add_argument("--bounds10", action="store_true", default=False, help="set bounds to [0,10]")

    parser.add_argument("--bounds515", action="store_true", default=False, help="set bounds to [5,15]")

    parser.add_argument("--force", action="store_true", default=False, help="force overwrite")

    parser.add_argument("--wholeset_prs", action="store_true", default=False, help="evaluate in the final p-r regime")

    parser.add_argument(
        "--no_apvst", action="store_true", default=False, help="do NOT evaluate in the ap vs. time regime"
    )

    parser.add_argument(
        "--det_configs", action="store_true", default=False, help="output detector statistics to det_configs"
    )

    parser.add_argument("--inverse_prior", action="store_true", default=False, help="use inverse prior class values")

    args = parser.parse_args()
    print(args)

    # If config file is not given, just run one experiment using default config
    if not args.config:
        configs = [DatasetPolicy.default_config]
    else:
        configs = load_configs(args.config)

    # Load the dataset
    dataset = Dataset("full_pascal_" + args.test_dataset)
    if args.first_n:
        dataset.images = dataset.images[: args.first_n]

    # Infer train_dataset
    if args.test_dataset == "test":
        train_dataset = Dataset("full_pascal_trainval")
    elif args.test_dataset == "val":
        train_dataset = Dataset("full_pascal_train")
    elif args.test_dataset == "trainval":
        train_dataset = Dataset("full_pascal_trainval")
    else:
        None  # impossible by argparse settings

    # Only need to set training dataset values; evaluation gets it from there
    if args.inverse_prior:
        train_dataset.set_values("inverse_prior")

    # TODO: hack
    if args.first_n_train:
        train_dataset.images = train_dataset.images[: args.first_n_train]

    # In both the above cases, we use the val dataset for weights
    weights_dataset_name = "full_pascal_val"

    dets_tables = []
    dets_tables_whole = []
    clses_tables_whole = []
    all_bounds = []

    plot_infos = []
    for config_f in configs:
        if args.suffix:
            config_f["suffix"] = args.suffix
        if args.bounds10:
            config_f["bounds"] = [0, 10]
        if args.bounds515:
            config_f["bounds"] = [5, 15]
        assert not (args.bounds10 and args.bounds515)
        if args.inverse_prior:
            config_f["suffix"] += "_inverse_prior"
            config_f["values"] = "inverse_prior"

        dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f)
        ev = Evaluation(dp)
        all_bounds.append(dp.bounds)
        plot_infos.append(dict((k, config_f[k]) for k in ("label", "line", "color") if k in config_f))
        # output the det configs first
        if args.det_configs:
            dp.output_det_statistics()

        # evaluate in the AP vs. Time regime, unless told not to
        if not args.no_apvst:
            dets_table = ev.evaluate_vs_t(None, None, force=args.force)
            # dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force)
            if comm_rank == 0:
                dets_tables.append(dets_table)
                # dets_tables_whole.append(dets_table_whole)
                # clses_tables_whole.append(clses_table_whole)

        # optionally, evaluate in the standard PR regime
        if args.wholeset_prs:
            ev.evaluate_detections_whole(None, force=args.force)

    # and plot the comparison if multiple config files were given
    if not args.no_apvst and len(configs) > 1 and comm_rank == 0:
        # filename of the final plot is the config file name
        dirname = config.get_evals_dir(dataset.get_name())
        filename = args.config
        if args.inverse_prior:
            filename += "_inverse_prior"

        # det avg
        ff = opjoin(dirname, "%s_det_avg.png" % filename)
        ff_nl = opjoin(dirname, "%s_det_avg_nl.png" % filename)

        # make sure directory exists
        ut.makedirs(os.path.dirname(ff))

        Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
        Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

        if False:
            # det whole
            ff = opjoin(dirname, "%s_det_whole.png" % filename)
            ff_nl = opjoin(dirname, "%s_det_whole_nl.png" % filename)
            Evaluation.plot_ap_vs_t(
                dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos
            )
            Evaluation.plot_ap_vs_t(
                dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos
            )

            # cls whole
            ff = opjoin(dirname, "%s_cls_whole.png" % filename)
            ff_nl = opjoin(dirname, "%s_cls_whole_nl.png" % filename)
            Evaluation.plot_ap_vs_t(
                clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos
            )
            Evaluation.plot_ap_vs_t(
                clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos
            )
예제 #40
0
파일: classifier.py 프로젝트: raldam/timely
 def __init__(self):
     self.clf = Classifier()
     self.d = Dataset('full_pascal_trainval')
예제 #41
0
def main():
  parser = argparse.ArgumentParser(
    description="Run experiments with the timely detection system.")

  parser.add_argument('--test_dataset',
    choices=['val','test','trainval'],
    default='val',
    help="""Dataset to use for testing. Run on val until final runs.
    The training dataset is inferred (val->train; test->trainval; trainval->trainval).""")

  parser.add_argument('--first_n', type=int,
    help='only take the first N images in the test dataset')

  parser.add_argument('--first_n_train', type=int,
    help='only take the first N images in the train dataset')

  parser.add_argument('--config',
    help="""Config file name that specifies the experiments to run.
    Give name such that the file is configs/#{name}.json or configs/#{name}/
    In the latter case, all files within the directory will be loaded.""")

  parser.add_argument('--suffix',
    help="Overwrites the suffix in the config(s).")

  parser.add_argument('--bounds10', action='store_true', 
    default=False, help='set bounds to [0,10]')
  
  parser.add_argument('--bounds515', action='store_true', 
    default=False, help='set bounds to [5,15]')

  parser.add_argument('--force', action='store_true', 
    default=False, help='force overwrite')

  parser.add_argument('--wholeset_prs', action='store_true', 
    default=False, help='evaluate in the final p-r regime')

  parser.add_argument('--no_apvst', action='store_true', 
    default=False, help='do NOT evaluate in the ap vs. time regime')

  parser.add_argument('--det_configs', action='store_true', 
    default=False, help='output detector statistics to det_configs')

  parser.add_argument('--inverse_prior', action='store_true', 
    default=False, help='use inverse prior class values')

  args = parser.parse_args()
  print(args)

  # If config file is not given, just run one experiment using default config
  if not args.config:
    configs = [DatasetPolicy.default_config]
  else:
    configs = load_configs(args.config)

  # Load the dataset
  dataset = Dataset('full_pascal_'+args.test_dataset)
  if args.first_n:
    dataset.images = dataset.images[:args.first_n]

  # Infer train_dataset
  if args.test_dataset=='test':
    train_dataset = Dataset('full_pascal_trainval')
  elif args.test_dataset=='val':
    train_dataset = Dataset('full_pascal_train')
  elif args.test_dataset=='trainval':
    train_dataset = Dataset('full_pascal_trainval')
  else:
    None # impossible by argparse settings
  
  # Only need to set training dataset values; evaluation gets it from there
  if args.inverse_prior:
    train_dataset.set_values('inverse_prior')

  # TODO: hack
  if args.first_n_train:
    train_dataset.images = train_dataset.images[:args.first_n_train]

  # In both the above cases, we use the val dataset for weights
  weights_dataset_name = 'full_pascal_val'

  dets_tables = []
  dets_tables_whole = []
  clses_tables_whole = []
  all_bounds = []
      
  plot_infos = [] 
  for config_f in configs:
    if args.suffix:
      config_f['suffix'] = args.suffix
    if args.bounds10:
      config_f['bounds'] = [0,10]
    if args.bounds515:
      config_f['bounds'] = [5,15]
    assert(not (args.bounds10 and args.bounds515))
    if args.inverse_prior:
      config_f['suffix'] += '_inverse_prior'
      config_f['values'] = 'inverse_prior'

    dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f)
    ev = Evaluation(dp)
    all_bounds.append(dp.bounds)
    plot_infos.append(dict((k,config_f[k]) for k in ('label','line','color') if k in config_f))
    # output the det configs first
    if args.det_configs:
      dp.output_det_statistics()

    # evaluate in the AP vs. Time regime, unless told not to
    if not args.no_apvst:
      dets_table = ev.evaluate_vs_t(None,None,force=args.force)
      #dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force)
      if comm_rank==0:
        dets_tables.append(dets_table)
        #dets_tables_whole.append(dets_table_whole)
        #clses_tables_whole.append(clses_table_whole)

    # optionally, evaluate in the standard PR regime
    if args.wholeset_prs:
      ev.evaluate_detections_whole(None,force=args.force)

  # and plot the comparison if multiple config files were given
  if not args.no_apvst and len(configs)>1 and comm_rank==0:
    # filename of the final plot is the config file name
    dirname = config.get_evals_dir(dataset.get_name())
    filename = args.config
    if args.inverse_prior:
      filename += '_inverse_prior'
    
    # det avg
    ff = opjoin(dirname, '%s_det_avg.png'%filename)
    ff_nl = opjoin(dirname, '%s_det_avg_nl.png'%filename)

    # make sure directory exists
    ut.makedirs(os.path.dirname(ff))
    
    Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
    Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

    if False:
      # det whole
      ff = opjoin(dirname, '%s_det_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_det_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

      # cls whole
      ff = opjoin(dirname, '%s_cls_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_cls_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)
예제 #42
0
from common_imports import *
from common_mpi import *
import synthetic.config as config

from synthetic.dataset import Dataset
from synthetic.extractor import Extractor

if __name__ == '__main__':
    d = Dataset('full_pascal_trainval')
    feature_type = 'dsift'
    numpos = 15
    num_words = 3000
    iterations = 8
    e = Extractor()
    all_classes = config.pascal_classes
    #  for cls_idx in range(comm_rank, len(all_classes), comm_size): # PARALLEL
    #  #for cls in all_classes:
    #    cls = all_classes[cls_idx]
    #    print cls
    #d, feature_type, num_words=3000,iterations=10, force_new=False, kmeansBatch=True
    e.get_codebook(d,
                   feature_type,
                   numpos,
                   iterations,
                   force_new=False,
                   kmeansBatch=True)
예제 #43
0
                dets_seq.append(cls_dets)
        cols = [
            'x', 'y', 'w', 'h', 'dummy', 'dummy', 'dummy', 'dummy', 'score',
            'time', 'cls_ind'
        ]
        # NMS detections per class individually
        dets_mc = ut.collect(dets_seq, Detector.nms_detections, {'cols': cols})
        dets_mc[:, :4] = BoundingBox.clipboxes_arr(
            dets_mc[:, :4], (0, 0, image.size[0] - 1, image.size[1] - 1))
        time_elapsed = time.time() - t
        print("On image %s, took %.3f s" % (image.name, time_elapsed))
        return dets_mc


if __name__ == '__main__':
    train_d = Dataset('full_pascal_trainval')

    just_combine = False

    for ds in ['full_pascal_trainval']:  # 'full_pascal_test'
        eval_d = Dataset(ds)
        dp = DatasetPolicy(eval_d, train_d, detectors=['csc_default'])
        test_table = np.zeros((len(eval_d.images), len(dp.actions)))

        if not just_combine:
            for img_idx in range(comm_rank, len(eval_d.images), comm_size):
                img = eval_d.images[img_idx]
                for act_idx, act in enumerate(dp.actions):
                    print '%s on %d for act %d' % (img.name, comm_rank,
                                                   act_idx)
                    score = act.obj.get_observations(img)['score']
예제 #44
0
class TestDatasetPascal:
  def setup(self):
    self.d = Dataset('test_pascal_train',force=True)
    
  def test_ground_truth_pascal_train(self):
    assert(self.d.num_classes() == 20)
    assert('dog' in self.d.classes)

  def test_ground_truth_for_class_pascal(self):
    correct = np.array(
      [[  48.,  240.,  148.,  132.,    11.,  0., 1., 0.]])
    ans = self.d.get_det_gt_for_class("dog")
    print ans
    assert np.all(ans.arr == correct)
      
  def test_neg_samples(self):
    # unlimited negative examples
    indices = self.d.get_neg_samples_for_class("dog",with_diff=True,with_trun=True)
    correct = np.array([1,2])
    assert(np.all(indices == correct))

    # maximum 1 negative example
    indices = self.d.get_neg_samples_for_class("dog",1,with_diff=True,with_trun=True)
    correct1 = np.array([1])
    correct2 = np.array([2])
    print(indices)
    assert(np.all(indices == correct1) or np.all(indices == correct2))

  def test_pos_samples(self):
    indices = self.d.get_pos_samples_for_class("dog")
    correct = np.array([0])
    assert(np.all(indices == correct))
    
  def test_ground_truth_test(self):
    d = Dataset('test_pascal_val')
    gt = d.get_det_gt(with_diff=False,with_trun=False)
    correct = np.matrix(
        [ [ 139.,  200.,   69.,  102.,   18.,   0., 0., 0.],
          [ 123.,  155.,   93.,   41.,   17.,   0., 0., 1.],
          [ 239.,  156.,   69.,   50.,    8.,   0., 0., 1.]])
    print(gt)
    assert np.all(gt.arr == correct)

  def test_get_pos_windows(self):
    d = Dataset('test_pascal_val')
    # TODO
    
  def test_kfold(self):
    """
    'sizes' here are empirical values over the trainval set.
    """
    d = Dataset('full_pascal_trainval')
    numfolds = 4
    d.create_folds(numfolds)
    cls = 'dog'
    sizes = [314, 308, 321, 320]
    for i in range(len(d.folds)):
      d.next_folds()
      pos = d.get_pos_samples_for_fold_class(cls)
      neg = d.get_neg_samples_for_fold_class(cls, pos.shape[0])
      assert(pos.shape[0] == sizes[i])
      assert(neg.shape[0] == sizes[i])
예제 #45
0
파일: dataset.py 프로젝트: raldam/timely
 def setup(self):
     self.d = Dataset('test_data1', force=True)
     self.classes = ["A", "B", "C"]
예제 #46
0
 def __init__(self):
   self.d = Dataset('test_data2',force=True)
   self.classes = ["A","B","C"]
   self.det_gt = self.d.get_det_gt()
예제 #47
0
파일: dataset.py 프로젝트: raldam/timely
 def setup(self):
     self.d = Dataset('test_pascal_train', force=True)
예제 #48
0
class TestEvaluationPerfect:
  def __init__(self):
    self.csc_trainval = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'csc_trainval'), 'r'))
    self.csc_test = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'csc_test'), 'r'))
    self.ext_csc_test = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'ext_csc_test'), 'r'))
    self.ext_csc_trainval = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'ext_csc_trainval'), 'r'))
    self.d_train = Dataset('full_pascal_trainval')
    self.trainval_gt = self.d_train.get_cls_ground_truth()
    self.d_test = Dataset('full_pascal_test')
    self.test_gt = self.d_test.get_cls_ground_truth()
  
  def setup(self):
    train_dataset = Dataset('test_pascal_train',force=True)
    dataset = Dataset('test_pascal_val',force=True)
    self.dp = DatasetPolicy(dataset,train_dataset,detector='perfect')
    self.evaluation = Evaluation(self.dp)    

  def test_compute_pr_multiclass(self):
    cols = ['x','y','w','h','cls_ind','img_ind','diff'] 
    dets_cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind', 'img_ind']
    
    # two objects of different classes in the image, perfect detection
    arr = np.array(
        [ [0,0,10,10,0,0,0],
          [10,10,10,10,1,0,0] ])
    gt = Table(arr,cols)

    dets_arr = np.array(
        [ [0,0,10,10,-1,-1,0,0],
          [10,10,10,10,-1,-1,1,0] ]) 
    dets = Table(dets_arr,dets_cols)
    
    # make sure gt and gt_cols aren't modified
    gt_arr_copy = gt.arr.copy()
    gt_cols_copy = list(gt.cols)
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    assert(np.all(gt.arr == gt_arr_copy))
    assert(gt_cols_copy == gt.cols)

    correct_ap = 1
    correct_rec = np.array([0.5,1])
    correct_prec = np.array([1,1])
    print((ap, rec, prec))
    assert(correct_ap == ap)
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # some extra detections to generate false positives
    dets_arr = np.array(
        [ [0,0,10,10,-1,-1,0,0],
          [0,0,10,10,0,-1,0,0],
          [10,10,10,10,0,-1,1,0],
          [10,10,10,10,-1,-1,1,0] ]) 
    dets = Table(dets_arr,dets_cols)

    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_rec = np.array([0.5,1,1,1])
    correct_prec = np.array([1,1,2./3,0.5])
    print((ap, rec, prec))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # confirm that running on the same dets gives the same answer
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_rec = np.array([0.5,1,1,1])
    correct_prec = np.array([1,1,2./3,0.5])
    print((ap, rec, prec))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # now let's add two objects of a different class to gt to lower recall
    arr = np.array(
        [ [0,0,10,10,0,0,0],
          [10,10,10,10,1,0,0],
          [20,20,10,10,2,0,0],
          [30,30,10,10,2,0,0] ])
    gt = Table(arr,cols)
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_rec = np.array([0.25,0.5,0.5,0.5])
    correct_prec = np.array([1,1,2./3,0.5])
    print((ap, rec, prec))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # now call it with empty detections
    dets_arr = np.array([])
    dets = Table(dets_arr,dets_cols)
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_ap = 0
    correct_rec = np.array([0])
    correct_prec = np.array([0])
    print((ap, rec, prec))
    assert(np.all(correct_ap==ap))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))
  
  def test_compute_cls_map(self):
    res = Evaluation.compute_cls_map(self.csc_trainval, self.trainval_gt)
    assert(round(res,11) == 0.47206391958)
    
  def test_compute_cls_map_half(self):
    table_csc_half = Table()
    table_csc_half.cols = list(self.csc_trainval.cols)
    for _ in range(10):
      rand_inds = np.random.permutation(range(5011))[:2500]
      table_csc_half.arr = self.csc_trainval.arr[rand_inds,:]      
      res = Evaluation.compute_cls_map(table_csc_half, self.trainval_gt)
      assert(round(res,6) > .45)
  
  def test_compute_cls_map_gt(self):
    res = Evaluation.compute_cls_map(self.trainval_gt, self.trainval_gt)
    assert(round(res,6) == 1)
    
  def test_compute_cls_map_gt_half(self):
    rand_inds = np.random.permutation(range(5011))[:2500]
    table_gt_half = Table()
    table_gt_half.arr = np.hstack((self.trainval_gt.arr,np.array(np.arange(5011), ndmin=2).T))
    table_gt_half.arr = table_gt_half.arr[rand_inds,:]
    table_gt_half.cols = list(self.trainval_gt.cols) + ['img_ind']
    res = Evaluation.compute_cls_map(table_gt_half, self.trainval_gt)
    assert(round(res,6) == 1)
  
  def test_compute_cls_map_random_clf(self):
    clf_table = Table()
    num_test = 10
    ress = np.zeros((num_test,))
    for idx in range(num_test):
      clf_table.arr = np.hstack((np.random.rand(5011, 20),np.array(np.arange(5011), ndmin=2).T))
      clf_table.cols = list(self.trainval_gt.cols) + ['img_ind']
      res = Evaluation.compute_cls_map(clf_table, self.trainval_gt)
      ress[idx] = res
    assert(np.mean(ress) < 0.09)
  
  def test_other_scores(self):
    print 'csc_test', Evaluation.compute_cls_map(self.csc_test, self.test_gt)
    print 'csc_trainval', Evaluation.compute_cls_map(self.csc_trainval, self.trainval_gt)
    
    print 'ext_test', Evaluation.compute_cls_map(self.ext_csc_test, self.test_gt)
    print 'ext_trainval', Evaluation.compute_cls_map(self.ext_csc_trainval, self.trainval_gt)
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_test), 'dp', 'table_chi2_20')
    ext_table_chi2_20 = cPickle.load(open(filename, 'r'))
    print 'ext_chi2_20_test', Evaluation.compute_cls_map(ext_table_chi2_20, self.test_gt)    
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_train), 'dp', 'table_chi2_20')
    ext_table_chi2_20_tv = cPickle.load(open(filename, 'r'))
    print 'ext_chi2_20_trainval', Evaluation.compute_cls_map(ext_table_chi2_20_tv, self.trainval_gt)
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_test), 'dp', 'table_rbf_20')
    ext_table_rbf_20 = cPickle.load(open(filename, 'r'))
    print 'ext_rbf_20_test', Evaluation.compute_cls_map(ext_table_rbf_20, self.test_gt)    
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_train), 'dp', 'table_rbf_20')
    ext_table_rbf_20_tv = cPickle.load(open(filename, 'r'))
    print 'ext_rbf_20_trainval', Evaluation.compute_cls_map(ext_table_rbf_20_tv, self.trainval_gt)
       
    filename = os.path.join(config.get_ext_dets_foldname(self.d_test), 'dp', 'table_linear_20')
    ext_linear_20_test = cPickle.load(open(filename, 'r'))
    print 'ext_linear_test', Evaluation.compute_cls_map(ext_linear_20_test, self.test_gt)
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_train), 'dp', 'table_linear_20')
    ext_table_linear_20 = cPickle.load(open(filename, 'r'))
    print 'ext_linear_20_trainval', Evaluation.compute_cls_map(ext_table_linear_20, self.trainval_gt)    
        
    filename = 'tab_linear_5'
    ext_tab_lin_5 = cPickle.load(open(filename, 'r'))
    print 'ext_tab_lin_5_trainval', Evaluation.compute_cls_map(ext_tab_lin_5, self.trainval_gt)    
예제 #49
0
def main():
    parser = argparse.ArgumentParser(description="Execute different functions of our system")
    parser.add_argument("--first_n", type=int, help="only take the first N images in the datasets")
    parser.add_argument(
        "--name", help="name for this run", default="default", choices=["default", "nolateral", "nohal", "halfsize"]
    )
    parser.add_argument("--force", action="store_true", default=False, help="force overwrite")

    args = parser.parse_args()
    print (args)

    # configuration class
    class config(object):
        pass

    cfg = config()
    cfg.testname = "../ctfdet/data/finalRL/%s2_test"  # object model
    cfg.bottomup = False  # use complete search
    cfg.resize = 1.0  # resize the input image
    cfg.hallucinate = True  # use HOGs up to 4 pixels
    cfg.initr = 1  # initial radious of the CtF search
    cfg.ratio = 1  # radious at the next levels
    cfg.deform = True  # use deformation
    cfg.usemrf = True  # use lateral constraints

    if args.name == "default":
        cfg
        # sticking with the default params
    elif args.name == "nolateral":
        cfg.usemrf = False
    elif args.name == "nohal":
        cfg.hallucinate = False
    elif args.name == "halfsize":
        cfg.resize = 0.5

    # f**k it, do both
    test_datasets = ["val", "test", "train"]
    for test_dataset in test_datasets:
        # Load the dataset
        dataset = Dataset("full_pascal_" + test_dataset)
        if args.first_n:
            dataset.images = dataset.images[: args.first_n]

        # create directory for storing cached detections
        dirname = "./temp_data"
        if os.path.exists("/u/sergeyk"):
            dirname = "/u/vis/x1/sergeyk/object_detection"
        dirname = dirname + "/ctfdets/%s" % (args.name)
        ut.makedirs(dirname)

        num_images = len(dataset.images)
        for img_ind in range(comm_rank, num_images, comm_size):
            # check for existing det
            image = dataset.images[img_ind]
            filename = os.path.join(dirname, image.name + ".npy")
            if os.path.exists(filename) and not args.force:
                # table = np.load(filename)[()]
                continue

            # read the image
            imname = dataset.get_image_filename(img_ind)
            img = util2.myimread(imname, resize=cfg.resize)
            # compute the hog pyramid
            f = pyrHOG2.pyrHOG(
                img, interv=10, savedir="", notsave=True, notload=True, hallucinate=cfg.hallucinate, cformat=True
            )

            # for each class
            all_dets = []
            for ccls in dataset.classes:
                t = time.time()
                cls_ind = dataset.get_ind(ccls)
                print "%s Img %d/%d Class: %s" % (test_dataset, img_ind + 1, num_images, ccls)
                # load the class model
                m = util2.load("%s%d.model" % (cfg.testname % ccls, 7))
                res = []
                t1 = time.time()
                # for each aspect
                for clm, m in enumerate(m):
                    # scan the image with left and right models
                    res.append(
                        pyrHOG2RL.detectflip(
                            f,
                            m,
                            None,
                            hallucinate=cfg.hallucinate,
                            initr=cfg.initr,
                            ratio=cfg.ratio,
                            deform=cfg.deform,
                            bottomup=cfg.bottomup,
                            usemrf=cfg.usemrf,
                            small=False,
                            cl=clm,
                        )
                    )
                fuse = []
                numhog = 0
                # fuse the detections
                for mix in res:
                    tr = mix[0]
                    fuse += mix[1]
                    numhog += mix[3]
                rfuse = tr.rank(fuse, maxnum=300)
                nfuse = tr.cluster(rfuse, ovr=0.3, inclusion=False)
                # print "Number of computed HOGs:",numhog
                time_elapsed = time.time() - t
                print "Elapsed time: %.3f s" % time_elapsed

                bboxes = [nf["bbox"] for nf in nfuse]
                scores = [nf["scr"] for nf in nfuse]
                assert len(bboxes) == len(scores)
                if len(bboxes) > 0:
                    arr = np.zeros((len(bboxes), 7))
                    arr[:, :4] = BoundingBox.convert_arr_from_corners(np.array(bboxes))
                    arr[:, 4] = scores
                    arr[:, 5] = time_elapsed
                    arr[:, 6] = cls_ind
                    all_dets.append(arr)
            cols = ["x", "y", "w", "h", "score", "time", "cls_ind"]
            if len(all_dets) > 0:
                all_dets = np.concatenate(all_dets, 0)
            else:
                all_dets = np.array([])
            table = Table(all_dets, cols)
            np.save(filename, table)
예제 #50
0
 def setup(self):
   self.d = Dataset('test_data1',force=True)
   self.classes = ["A","B","C"]
예제 #51
0
파일: evaluation.py 프로젝트: raldam/timely
 def setup(self):
     train_dataset = Dataset('test_pascal_train', force=True)
     dataset = Dataset('test_pascal_val', force=True)
     self.dp = DatasetPolicy(dataset, train_dataset, detector='perfect')
     self.evaluation = Evaluation(self.dp)
예제 #52
0
def main():
  parser = argparse.ArgumentParser(description='Execute different functions of our system')
  parser.add_argument('mode',
    choices=[
      'window_stats', 'evaluate_metaparams', 'evaluate_jw',
      'evaluate_get_pos_windows', 'train_svm',
      'extract_sift','extract_assignments','extract_codebook',
      'evaluate_jw_grid', 'final_metaparams',
      'assemble_dpm_dets','ctfdet','assemble_ctf_dets'
      ])
  parser.add_argument('--test_dataset', choices=['val','test','train'],
      default='test', help='dataset to use for testing. the training dataset \
      is automatically inferred (val->train and test->trainval).')
  parser.add_argument('--first_n', type=int,
      help='only take the first N images in the datasets')
  parser.add_argument('--bounds', type=str,
      help='the start_time and deadline_time for the ImagePolicy and corresponding evaluation. ex: (1,5)')
  parser.add_argument('--name', help='name for this run')
  parser.add_argument('--priors', default='random', help= \
      "list of choice for the policy for selecting the next action. choose from random, oracle,fixed_order, no_smooth, backoff. ex: --priors=random,oracle,no_smooth")
  parser.add_argument('--compare_evals', action='store_true', 
      default=False, help='plot all the priors modes given on same plot'),
  parser.add_argument('--detector', choices=['perfect','perfect_with_noise', 'dpm','ctf'],
      default='perfect', help='detector type')
  parser.add_argument('--force', action='store_true', 
      default=False, help='force overwrite')
  parser.add_argument('--gist', action='store_true', 
      default=False, help='use GIST as one of the actions')
  parser.add_argument('--clear_tmp', action='store_true', 
      default=False, help='clear the cached windows folder before running'),
  parser.add_argument('--feature_type', choices=['sift','dsift'], 
      default='dsift', help='use this feature type'),
  parser.add_argument('--kernel', choices=['chi2','rbf'], 
      default='chi2', help='kernel to train svm on'),
      
  args = parser.parse_args()
  if args.priors:
    args.priors = args.priors.split(',')
  if args.bounds:
    args.bounds = [float(x) for x in re.findall(r'\d+', args.bounds)]
    assert(len(args.bounds)==2)
  print(args)

  # Load the dataset
  dataset = Dataset('full_pascal_'+args.test_dataset)
  if args.first_n:
    dataset.images = dataset.images[:args.first_n]

  # Infer train_dataset
  if args.test_dataset=='test':
    train_dataset = Dataset('full_pascal_trainval')
  elif args.test_dataset=='val':
    train_dataset = Dataset('full_pascal_train')
  else:
    print("Impossible, setting train_dataset to dataset")
    train_dataset = dataset
  
  # Create window generator
  sw = SlidingWindows(dataset,train_dataset)

  if args.clear_tmp:
    dirname = config.get_sliding_windows_cached_dir(train_dataset.get_name())
    shutil.rmtree(dirname)
    dirname = config.get_sliding_windows_cached_dir(dataset.get_name())
    shutil.rmtree(dirname)

  if args.mode=='assemble_dpm_dets':
    policy = DatasetPolicy(dataset,train_dataset,sw)
    dets = policy.load_ext_detections(dataset,suffix='dpm_may25')

  if args.mode=='assemble_ctf_dets':
    policy = DatasetPolicy(dataset,train_dataset,sw)
    dets = policy.load_ext_detections(dataset,'ctf','ctf_default')
    dets = policy.load_ext_detections(dataset,'ctf','ctf_nohal')
    dets = policy.load_ext_detections(dataset,'ctf', 'ctf_halfsize')

  if args.mode=='evaluate_get_pos_windows':
    evaluate_get_pos_windows(train_dataset)
    return

  if args.mode=='window_stats':
    "Compute and plot the statistics of ground truth window parameters."
    results = SlidingWindows.get_dataset_window_stats(train_dataset,plot=True)

  if args.mode=='ctfdet':
    """Run Pedersoli's detector on the dataset and assemble into one Table."""
    run_pedersoli(dataset)

  if args.mode=='evaluate_jw':
    """
    Evaluate the jumping window approach by producing plots of recall vs.
    #windows.
    """
    # TODO hack: both sw and jw should subclass something like WindowGenerator
    jw = JumpingWindowsDetector(use_scale=True)
    sw.jw = jw
    #classes = dataset.classes
    classes = ['car']
#    classes = ['bicycle' ,'car','horse', 'sofa',\
#               'bird',  'chair',     'motorbike', 'train',\
#               'boat',  'cow',       'person',    'tvmonitor',\
#               'bottle','diningtable',  'pottedplant',\
#               'bus','dog'     ,'sheep']
    for cls_idx in range(comm_rank, len(classes), comm_size):
    #for cls in dataset.classes:
      cls = classes[cls_idx]
      dirname = config.get_jumping_windows_dir(dataset.get_name())
      filename = os.path.join(dirname,'%s'%cls)
      sw.evaluate_recall(cls, filename, metaparams=None, mode='jw', plot=True)
  
  if args.mode=='evaluate_jw_grid':
    """
    Evaluate the jumping window approach by producing plots of recall vs.
    #windows.
    """
    sw = SlidingWindows(dataset,train_dataset)
    jw = JumpingWindowsDetectorGrid()
    sw.jw = jw
    for cls in dataset.classes:
      dirname = config.get_jumping_windows_dir(dataset.get_name())
      filename = os.path.join(dirname,'%s'%cls)
      if os.path.isfile(config.data_dir + 'JumpingWindows/'+cls):
        sw.evaluate_recall(cls, filename, metaparams=None, mode='jw', plot=True)

  if args.mode=='train_svm':
    randomize = not os.path.exists('/home/tobibaum')
    
    d = Dataset('full_pascal_train')
    dtest = Dataset('full_pascal_val')  
    e = Extractor()  
    classes = config.pascal_classes  
    num_words = 3000
    iters = 5
    feature_type = 'dsift'
    codebook_samples = 15
    num_pos = 'max'
    testsize = 'max'
    if args.first_n:
      num_pos = args.first_n
      testsize = 1.5*num_pos
     
    kernel = args.kernel
    
    if comm_rank == 0:
      ut.makedirs(config.data_dir + 'features/' + feature_type + '/times/')
      ut.makedirs(config.data_dir + 'features/' + feature_type + '/codebooks/times/')
      ut.makedirs(config.data_dir + 'features/' + feature_type + '/svms/train_times/')
      
    for cls_idx in range(comm_rank, len(classes), comm_size): 
    #for cls in classes:
      cls = classes[cls_idx]
      codebook = e.get_codebook(d, feature_type)
      pos_arr = d.get_pos_windows(cls)
      
      neg_arr = d.get_neg_windows(pos_arr.shape[0], cls, max_overlap=0)
      
      if not num_pos == 'max':    
        if not randomize:
          pos_arr = pos_arr[:num_pos]
          neg_arr = pos_arr[:num_pos]
        else:
          rand = np.random.random_integers(0, pos_arr.shape[0] - 1, size=num_pos)
          pos_arr = pos_arr[rand]
          rand = np.random.random_integers(0, neg_arr.shape[0] - 1, size=num_pos)
          neg_arr = neg_arr[rand]     
      pos_table = Table(pos_arr, ['x','y','w','h','img_ind'])
      neg_table = Table(neg_arr, pos_table.cols)      
      train_with_hard_negatives(d, dtest,  num_words,codebook_samples,codebook,\
                                cls, pos_table, neg_table,feature_type, \
                                iterations=iters, kernel=kernel, L=2, \
                                testsize=testsize,randomize=randomize)

  if args.mode=='evaluate_metaparams':
    """
    Grid search over metaparams values for get_windows_new, with the AUC of
    recall vs. # windows evaluation.
    """
    sw.grid_search_over_metaparams()
    return

  if args.mode=='final_metaparams':
    dirname = config.get_sliding_windows_metaparams_dir(train_dataset.get_name())
    # currently these are the best auc/complexity params
    best_params_for_classes = [
        (62,15,12,'importance',0), #aeroplane
        (83,15,12,'importance',0), #bicycle
        (62,15,12,'importance',0), #bird
        (62,15,12,'importance',0), #boat
        (125,12,12,'importance',0), #bottle
        (83,12,9,'importance',0), #bus
        (125,15,9,'importance',0), #car
        (125,12,12,'linear',0), #cat
        (125,15,9,'importance',0), #chair
        (125,9,6,'importance',0), #cow
        (125,15,6,'linear',0), #diningtable
        (62,15,12,'importance',0), #dog
        (83,15,6,'importance',0), #horse
        (83,12,6,'importance',0), #motorbike
        (83,15,12,'importance',0), #person
        (83,15,6,'importance',0), #pottedplant
        (83,15,12,'importance',0), #sheep
        (83,9,6,'importance',0), #sofa
        (62,12,6,'importance',0), #train
        (62,12,12,'importance',0), #tvmonitor
        (125,9,12,'importance',0) #all
        ]
    # ACTUALLY THEY ARE ALL THE SAME!
    cheap_params = (62, 9, 6, 'importance', 0)
    for i in range(comm_rank,dataset.num_classes(),comm_size):
      cls = dataset.classes[i]
      best_params = best_params_for_classes[i]
      #samples,num_scales,num_ratios,mode,priority,cls = cheap_params

      metaparams = {
        'samples_per_500px': samples,
        'num_scales': num_scales,
        'num_ratios': num_ratios,
        'mode': mode,
        'priority': 0 }
      filename = '%s_%d_%d_%d_%s_%d'%(
          cls,
          metaparams['samples_per_500px'],
          metaparams['num_scales'],
          metaparams['num_ratios'],
          metaparams['mode'],
          metaparams['priority'])
      filename = os.path.join(dirname,filename)

      tables = sw.evaluate_recall(cls,filename,metaparams,'sw',plot=True,force=False)

      metaparams = {
        'samples_per_500px': samples,
        'num_scales': num_scales,
        'num_ratios': num_ratios,
        'mode': mode,
        'priority': 1 }
      filename = '%s_%d_%d_%d_%s_%d'%(
          cls,
          metaparams['samples_per_500px'],
          metaparams['num_scales'],
          metaparams['num_ratios'],
          metaparams['mode'],
          metaparams['priority'])
      filename = os.path.join(dirname,filename)

      tables = sw.evaluate_recall(cls,filename,metaparams,'sw',plot=True,force=False)
    return

  if args.mode=='extract_sift':
    e=Extractor()
    e.extract_all(['sift'], ['full_pascal_trainval','full_pascal_test'], 0, 0) 
    
  if args.mode=='extract_assignments':
    e=Extractor()
    feature_type = 'sift'
    for image_set in ['full_pascal_trainval','full_pascal_test']:
      d = Dataset(image_set)
      codebook = e.get_codebook(d, feature_type)  
      print 'codebook loaded'
      
      for img_ind in range(comm_rank,len(d.images),comm_size):
        img = d.images[img_ind]
      #for img in d.images:
        e.get_assignments(np.array([0,0,img.size[0],img.size[1]]), feature_type, \
                          codebook, img)

  if args.mode=='extract_codebook':
    d = Dataset('full_pascal_trainval')
    e = Extractor()
    codebook = e.get_codebook(d, args.feature_type)