コード例 #1
0
 def test_learn_weights(self):
   dataset = Dataset('full_pascal_val')
   train_dataset = Dataset('full_pascal_train')
   dataset.images = dataset.images[:20]
   train_dataset.images = train_dataset.images[:20]
   dp = DatasetPolicy(dataset,train_dataset,self.weights_dataset_name,**self.config)
   weights = dp.learn_weights()
コード例 #2
0
ファイル: evaluation.py プロジェクト: raldam/timely
    def test(self):
        scores = np.ones(self.det_gt.shape[0])
        dets = self.det_gt.append_column('score', scores)

        scores = np.ones(self.d.get_det_gt_for_class('A').shape[0])
        dets_just_A = self.d.get_det_gt_for_class('A')
        dets_just_A = dets_just_A.append_column('score', scores)

        self.d.set_values('uniform')
        assert (np.all(self.d.values == 1. / 3 * np.ones(len(self.classes))))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.33333333333333))

        self.d.set_values('inverse_prior')
        assert (np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
        dp = DatasetPolicy(self.d, self.d, detector='perfect')
        ev = Evaluation(dp)
        ap = ev.compute_det_map(dets, self.det_gt)
        assert (ap == 1)
        ap = ev.compute_det_map(dets_just_A, self.det_gt)
        print(ap)
        assert (ut.fequal(ap, 0.25))
コード例 #3
0
  def test_load_dpm_detections(self):
    conf = dict(self.config)
    conf['detectors'] = ['dpm']
    policy = DatasetPolicy(self.dataset,self.train_dataset,**conf)
    assert(policy.detectors == ['dpm'])
    dets = policy.load_ext_detections(self.dataset,'dpm_may25',force=True)
    dets = dets.with_column_omitted('time')

    # load the ground truth dets, processed in Matlab
    # (timely/data/test_support/concat_dets.m)
    filename = os.path.join(config.test_support_dir, 'val_dets.mat')
    dets_correct = Table(
        scipy.io.loadmat(filename)['dets'],
        ['x1','y1','x2','y2','dummy','dummy','dummy','dummy','score','cls_ind','img_ind'],
        'dets_correct')
    dets_correct = dets_correct.subset(
        ['x1','y1','x2','y2','score','cls_ind','img_ind'])
    dets_correct.arr[:,:4] -= 1
    dets_correct.arr[:,:4] = BoundingBox.convert_arr_from_corners(
        dets_correct.arr[:,:4])
    dets_correct.cols = ['x','y','w','h','score','cls_ind','img_ind']
    
    print('----mine:')
    print(dets)
    print('----correct:')
    print(dets_correct)
    assert(dets_correct == dets)
コード例 #4
0
    def test_load_dpm_detections(self):
        conf = dict(self.config)
        conf['detectors'] = ['dpm']
        policy = DatasetPolicy(self.dataset, self.train_dataset, **conf)
        assert (policy.detectors == ['dpm'])
        dets = policy.load_ext_detections(self.dataset,
                                          'dpm_may25',
                                          force=True)
        dets = dets.with_column_omitted('time')

        # load the ground truth dets, processed in Matlab
        # (timely/data/test_support/concat_dets.m)
        filename = os.path.join(config.test_support_dir, 'val_dets.mat')
        dets_correct = Table(
            scipy.io.loadmat(filename)['dets'], [
                'x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy', 'dummy',
                'score', 'cls_ind', 'img_ind'
            ], 'dets_correct')
        dets_correct = dets_correct.subset(
            ['x1', 'y1', 'x2', 'y2', 'score', 'cls_ind', 'img_ind'])
        dets_correct.arr[:, :4] -= 1
        dets_correct.arr[:, :4] = BoundingBox.convert_arr_from_corners(
            dets_correct.arr[:, :4])
        dets_correct.cols = ['x', 'y', 'w', 'h', 'score', 'cls_ind', 'img_ind']

        print('----mine:')
        print(dets)
        print('----correct:')
        print(dets_correct)
        assert (dets_correct == dets)
コード例 #5
0
 def test_learn_weights(self):
     dataset = Dataset('full_pascal_val')
     train_dataset = Dataset('full_pascal_train')
     dataset.images = dataset.images[:20]
     train_dataset.images = train_dataset.images[:20]
     dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name,
                        **self.config)
     weights = dp.learn_weights()
コード例 #6
0
 def __init__(self):
     self.dataset = Dataset('test_pascal_val')
     self.train_dataset = Dataset('test_pascal_train')
     self.weights_dataset_name = 'test_pascal_val'
     self.config = {
         'suffix': 'default',
         'detectors':
         ['perfect'],  # perfect,perfect_with_noise,dpm,csc_default,csc_half
         'policy_mode': 'random',
         'bounds': None,
         'weights_mode':
         'manual_1'  # manual_1, manual_2, manual_3, greedy, rl
     }
     self.dp = DatasetPolicy(self.dataset, self.train_dataset,
                             self.weights_dataset_name, **self.config)
コード例 #7
0
def train_csc_svms(d_train, d_val, kernel, C):
    # d: trainval
    # d_train: train  |   trainval
    # d_val: val      |   test
    dp = DatasetPolicy(d_train, d_train, detectors=['csc_default'])

    for cls_idx in range(mpi.comm_rank, len(d_train.classes), mpi.comm_size):
        cls = d_train.classes[cls_idx]
        ext_detector = dp.actions[cls_idx].obj
        csc = CSCClassifier('default', cls, d_train, d_val)
        csc.train_for_cls(ext_detector, kernel, C)
コード例 #8
0
 def __init__(self):
   self.dataset = Dataset('test_pascal_val')
   self.train_dataset = Dataset('test_pascal_train')
   self.weights_dataset_name = 'test_pascal_val'
   self.config = {
     'suffix': 'default',
     'detectors': ['perfect'], # perfect,perfect_with_noise,dpm,csc_default,csc_half
     'policy_mode': 'random',
     'bounds': None,
     'weights_mode': 'manual_1' # manual_1, manual_2, manual_3, greedy, rl
   }
   self.dp = DatasetPolicy(
     self.dataset,self.train_dataset,self.weights_dataset_name,**self.config)
コード例 #9
0
def run():
  dataset = Dataset('full_pascal_test')
  train_dataset = Dataset('full_pascal_trainval')
  cls = 'dog'
  rtype = '1big_2small'
  args = 0.5
  detector = 'csc_default'
  from synthetic.dataset_policy import DatasetPolicy
  all_dets = DatasetPolicy.load_ext_detections(dataset, detector)
  cls_ind = dataset.get_ind(cls)
  dets = all_dets.filter_on_column('cls_ind',cls_ind,omit=True)  
  ext_det = ExternalDetectorRegions(dataset, train_dataset, cls, dets, detector, rtype, args)
  img = dataset.images[13]  # Just some random image...where did the get_image_by_name go?
  print img.size
  print ext_det.detect(img, 0)
  print ext_det.detect(img, 1)
  print ext_det.detect(img, 2)
コード例 #10
0
def run():
    dataset = Dataset('full_pascal_test')
    train_dataset = Dataset('full_pascal_trainval')
    cls = 'dog'
    rtype = '1big_2small'
    args = 0.5
    detector = 'csc_default'
    from synthetic.dataset_policy import DatasetPolicy
    all_dets = DatasetPolicy.load_ext_detections(dataset, detector)
    cls_ind = dataset.get_ind(cls)
    dets = all_dets.filter_on_column('cls_ind', cls_ind, omit=True)
    ext_det = ExternalDetectorRegions(dataset, train_dataset, cls, dets,
                                      detector, rtype, args)
    img = dataset.images[
        13]  # Just some random image...where did the get_image_by_name go?
    print img.size
    print ext_det.detect(img, 0)
    print ext_det.detect(img, 1)
    print ext_det.detect(img, 2)
コード例 #11
0
def test_csc_svm(d_train, d_val):
    dp = DatasetPolicy(d_val, d_train, detectors=['csc_default'])

    table = np.zeros((len(d_val.images), len(d_val.classes)))
    for cls_idx in range(mpi.comm_rank, len(d_val.classes), mpi.comm_size):
        cls = d_val.classes[cls_idx]
        ext_detector = dp.actions[cls_idx].obj
        # Load the classifier we trained in train_csc_svms
        csc = CSCClassifier('default', cls, d_train, d_val)
        table[:, cls_idx] = csc.eval_cls(ext_detector)

    print '%d is at safebarrier' % mpi.comm_rank
    safebarrier(comm)

    print 'passed safebarrier'
    table = comm.reduce(table, op=MPI.SUM, root=0)
    if mpi.comm_rank == 0:
        print 'save table'
        print table
        cPickle.dump(table, open('table', 'w'))
        print 'saved'
    return table
コード例 #12
0
    def test_filter_dets(self):
        dataset = Dataset("full_pascal_test")
        train_dataset = Dataset("full_pascal_trainval")
        cls = "dog"
        rtype = "scale_location"
        args = 0.5
        detector = "csc_default"
        all_dets = DatasetPolicy.load_ext_detections(dataset, detector)
        cls_ind = dataset.get_ind(cls)
        dets = all_dets.filter_on_column("cls_ind", cls_ind, omit=True)
        ext_det = ExternalDetectorRegions(dataset, train_dataset, cls, dets, detector, rtype, args)
        img = dataset.images[133]  # Just some random image...where did the get_image_by_name go?
        split_x = img.size[0] / 2
        split_scale = img.size[0] * args
        dets = ext_det.detect(img, 0)
        for det in dets[0]:  # left, small
            assert det[0] < split_x
            assert det[2] < split_scale
        dets = ext_det.detect(img, 1)
        for det in dets[0]:  # right, small
            assert det[0] >= split_x
            assert det[2] < split_scale
        dets = ext_det.detect(img, 2)
        for det in dets[0]:  # left, big
            assert det[0] < split_x
            assert det[2] >= split_scale
        dets = ext_det.detect(img, 3)
        for det in dets[0]:  # right, big
            assert det[0] >= split_x
            assert det[2] >= split_scale

        # and those we just run to check that nothing is syntactically going wrong
        # (no errors/...)
        ext_det.compute_score(img, 0)
        ext_det.compute_score(img, 1)
        ext_det.compute_score(img, 2)
        ext_det.compute_score(img, 3)
コード例 #13
0
class TestDatasetPolicy:
  def __init__(self):
    self.dataset = Dataset('test_pascal_val')
    self.train_dataset = Dataset('test_pascal_train')
    self.weights_dataset_name = 'test_pascal_val'
    self.config = {
      'suffix': 'default',
      'detectors': ['perfect'], # perfect,perfect_with_noise,dpm,csc_default,csc_half
      'policy_mode': 'random',
      'bounds': None,
      'weights_mode': 'manual_1' # manual_1, manual_2, manual_3, greedy, rl
    }
    self.dp = DatasetPolicy(
      self.dataset,self.train_dataset,self.weights_dataset_name,**self.config)

  def test_run_on_dataset(self):
    # run on test dataset
    dets,clses,samples = self.dp.run_on_dataset(force=True) 
    assert(len(samples) == clses.shape[0])
    assert(len(samples) == self.dp.dataset.num_images()*len(self.dp.actions))
    train_dets,train_clses,train_samples = self.dp.run_on_dataset(train=True,force=True)
    assert(len(train_samples) == train_clses.shape[0])
    assert(len(train_samples) == self.dp.train_dataset.num_images()*len(self.dp.actions))

  def test_unique_samples(self):
    "Test the correctness of making a list of samples unique."
    dets,clses,samples = self.dp.run_on_dataset(force=True)
    new_sample = copy.deepcopy(samples[11])
    new_sample2 = copy.deepcopy(samples[11])
    new_sample2.dt = -40 # an unreasonable value
    assert(new_sample in samples)
    assert(new_sample2 not in samples)

  def test_output_det_statistics(self):
    self.dp.output_det_statistics()

  def test_learn_weights(self):
    dataset = Dataset('full_pascal_val')
    train_dataset = Dataset('full_pascal_train')
    dataset.images = dataset.images[:20]
    train_dataset.images = train_dataset.images[:20]
    dp = DatasetPolicy(dataset,train_dataset,self.weights_dataset_name,**self.config)
    weights = dp.learn_weights()

  def test_regress(self):
    dets,clses,samples = self.dp.run_on_dataset(force=True)
    weights,error = self.dp.regress(samples,'greedy')
    print "Weights after %d samples:\n %s"%(len(samples),weights)
    print "Error after %d samples: %s"%(len(samples),error)
    samples += samples
    weights,error = self.dp.regress(samples,'greedy')
    print "Weights after %d samples:\n %s"%(len(samples),weights)
    print "Error after %d samples: %s"%(len(samples),error)
    samples += samples
    weights,error = self.dp.regress(samples,'greedy')
    print "Weights after %d samples:\n %s"%(len(samples),weights)
    print "Error after %d samples: %s"%(len(samples),error)

  def test_load_weights(self):
    modes = ['manual_1','manual_2','manual_3']
    for mode in modes:
      print "%s weights:"%mode
      self.dp.weights_mode=mode
      self.dp.load_weights()
      print self.dp.get_reshaped_weights()
      assert(self.dp.weights.shape[0] == len(self.dp.actions)*BeliefState.num_features)
      self.dp.write_weights_image('temp_weights_%s.png'%mode)

  def test_perfect_detector(self):
    dets,clses,samples = self.dp.run_on_dataset(force=True)
    #embed()
    dets = dets.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])
    gt = self.dataset.get_det_gt()
    gt = gt.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])

    # TODO: does this make sense?
    dets.sort_by_column('x')
    gt.sort_by_column('x')
    print dets
    print gt
    assert(dets == gt)

  def test_load_dpm_detections(self):
    conf = dict(self.config)
    conf['detectors'] = ['dpm']
    policy = DatasetPolicy(self.dataset,self.train_dataset,**conf)
    assert(policy.detectors == ['dpm'])
    dets = policy.load_ext_detections(self.dataset,'dpm_may25',force=True)
    dets = dets.with_column_omitted('time')

    # load the ground truth dets, processed in Matlab
    # (timely/data/test_support/concat_dets.m)
    filename = os.path.join(config.test_support_dir, 'val_dets.mat')
    dets_correct = Table(
        scipy.io.loadmat(filename)['dets'],
        ['x1','y1','x2','y2','dummy','dummy','dummy','dummy','score','cls_ind','img_ind'],
        'dets_correct')
    dets_correct = dets_correct.subset(
        ['x1','y1','x2','y2','score','cls_ind','img_ind'])
    dets_correct.arr[:,:4] -= 1
    dets_correct.arr[:,:4] = BoundingBox.convert_arr_from_corners(
        dets_correct.arr[:,:4])
    dets_correct.cols = ['x','y','w','h','score','cls_ind','img_ind']
    
    print('----mine:')
    print(dets)
    print('----correct:')
    print(dets_correct)
    assert(dets_correct == dets)
コード例 #14
0
class TestDatasetPolicy:
    def __init__(self):
        self.dataset = Dataset('test_pascal_val')
        self.train_dataset = Dataset('test_pascal_train')
        self.weights_dataset_name = 'test_pascal_val'
        self.config = {
            'suffix': 'default',
            'detectors':
            ['perfect'],  # perfect,perfect_with_noise,dpm,csc_default,csc_half
            'policy_mode': 'random',
            'bounds': None,
            'weights_mode':
            'manual_1'  # manual_1, manual_2, manual_3, greedy, rl
        }
        self.dp = DatasetPolicy(self.dataset, self.train_dataset,
                                self.weights_dataset_name, **self.config)

    def test_run_on_dataset(self):
        # run on test dataset
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        assert (len(samples) == clses.shape[0])
        assert (len(samples) == self.dp.dataset.num_images() *
                len(self.dp.actions))
        train_dets, train_clses, train_samples = self.dp.run_on_dataset(
            train=True, force=True)
        assert (len(train_samples) == train_clses.shape[0])
        assert (len(train_samples) == self.dp.train_dataset.num_images() *
                len(self.dp.actions))

    def test_unique_samples(self):
        "Test the correctness of making a list of samples unique."
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        new_sample = copy.deepcopy(samples[11])
        new_sample2 = copy.deepcopy(samples[11])
        new_sample2.dt = -40  # an unreasonable value
        assert (new_sample in samples)
        assert (new_sample2 not in samples)

    def test_output_det_statistics(self):
        self.dp.output_det_statistics()

    def test_learn_weights(self):
        dataset = Dataset('full_pascal_val')
        train_dataset = Dataset('full_pascal_train')
        dataset.images = dataset.images[:20]
        train_dataset.images = train_dataset.images[:20]
        dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name,
                           **self.config)
        weights = dp.learn_weights()

    def test_regress(self):
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        weights, error = self.dp.regress(samples, 'greedy')
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)
        samples += samples
        weights, error = self.dp.regress(samples, 'greedy')
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)
        samples += samples
        weights, error = self.dp.regress(samples, 'greedy')
        print "Weights after %d samples:\n %s" % (len(samples), weights)
        print "Error after %d samples: %s" % (len(samples), error)

    def test_load_weights(self):
        modes = ['manual_1', 'manual_2', 'manual_3']
        for mode in modes:
            print "%s weights:" % mode
            self.dp.weights_mode = mode
            self.dp.load_weights()
            print self.dp.get_reshaped_weights()
            assert (self.dp.weights.shape[0] == len(self.dp.actions) *
                    BeliefState.num_features)
            self.dp.write_weights_image('temp_weights_%s.png' % mode)

    def test_perfect_detector(self):
        dets, clses, samples = self.dp.run_on_dataset(force=True)
        #embed()
        dets = dets.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])
        gt = self.dataset.get_det_gt()
        gt = gt.subset(['x', 'y', 'w', 'h', 'cls_ind', 'img_ind'])

        # TODO: does this make sense?
        dets.sort_by_column('x')
        gt.sort_by_column('x')
        print dets
        print gt
        assert (dets == gt)

    def test_load_dpm_detections(self):
        conf = dict(self.config)
        conf['detectors'] = ['dpm']
        policy = DatasetPolicy(self.dataset, self.train_dataset, **conf)
        assert (policy.detectors == ['dpm'])
        dets = policy.load_ext_detections(self.dataset,
                                          'dpm_may25',
                                          force=True)
        dets = dets.with_column_omitted('time')

        # load the ground truth dets, processed in Matlab
        # (timely/data/test_support/concat_dets.m)
        filename = os.path.join(config.test_support_dir, 'val_dets.mat')
        dets_correct = Table(
            scipy.io.loadmat(filename)['dets'], [
                'x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy', 'dummy',
                'score', 'cls_ind', 'img_ind'
            ], 'dets_correct')
        dets_correct = dets_correct.subset(
            ['x1', 'y1', 'x2', 'y2', 'score', 'cls_ind', 'img_ind'])
        dets_correct.arr[:, :4] -= 1
        dets_correct.arr[:, :4] = BoundingBox.convert_arr_from_corners(
            dets_correct.arr[:, :4])
        dets_correct.cols = ['x', 'y', 'w', 'h', 'score', 'cls_ind', 'img_ind']

        print('----mine:')
        print(dets)
        print('----correct:')
        print(dets_correct)
        assert (dets_correct == dets)
コード例 #15
0
def run_fastinf_different_settings(d, ms, rs, suffixs, num_bins = 5):
  
  settings = list(itertools.product(suffixs, ms, rs))
  table_gt = d.get_cls_ground_truth().arr.astype(int)
  print 'run with a total of %d settings'%len(settings)
  
  for setindx in range(mpi.comm_rank, len(settings), mpi.comm_size):
    second_table = None
    setin = settings[setindx]
    suffix = setin[0]
    m = str(setin[1])
    r1 = str(setin[2])
    
    print 'node %d runs %s, m=%s, r1=%s'%(mpi.comm_rank, suffix, m, r1)

    filename = config.get_fastinf_mrf_file(d, suffix)
    data_filename = config.get_fastinf_data_file(d, suffix)
    
    if suffix == 'perfect':      
      table = np.hstack((table_gt, table_gt))
      bounds = np.tile(np.linspace(0, 1, num_bins+1),(table_gt.shape[1],1))
      print bounds
      
    elif suffix == 'GIST':
      table = gist_classify_dataset(d)   
      bounds, discr_table = discretize_table(table, num_bins)  
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      print filename_csc
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC_regions':
      rm = RegionModel("1big_2small", 0.5)
      detector = 'csc_default'
      from synthetic.dataset_policy import DatasetPolicy
      orig_table = DatasetPolicy.load_ext_detections(d, detector)            
      gt = d.get_det_gt().copy()
      # we need to spice up the gt by a score of 1 for each class (results in less code)
      gt.cols.append('score')
      gt.arr = np.hstack((gt.arr, np.ones((gt.shape[0], 1))))  
      table_gt_region = create_regioned_table(rm, gt, d.images, len(d.classes))
      # At this point we need to split them for the different regions
      orig_table_region = create_regioned_table(rm, orig_table, d.images, len(d.classes))
      
      bounds, discr_table_region = discretize_table(orig_table_region, num_bins)
      table = np.hstack((table_gt_region, discr_table_region))
      
    elif suffix == 'GIST_CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)      
      table = np.hstack((table_gt, discr_table))
      store_bound(d, 'CSC', bounds)
      
      second_table = gist_classify_dataset(d)      
      sec_bounds, second_table = discretize_table(second_table, num_bins)      
      store_bound(d, 'GIST', sec_bounds)
      
      full_bound = np.hstack((sec_bounds, bounds))
      store_bound(d, 'GIST_CSC', full_bound)
    
    if not suffix == 'GIST_CSC':
      store_bound(d, suffix, bounds)
    
    print 'set up table on %d, write out mrf for %s, m=%s, r1=%s'%(mpi.comm_rank, suffix, m, r1)   
      
    write_out_mrf(table, num_bins, filename, data_filename, second_table=second_table)
    
    add_sets = ['-m',m]
    if not r1 == '':
      add_sets += ['-r1', r1]
          
    if not second_table == None:
      sec_bound_file = '%s_secbounds'%filename
      for s in add_sets:
        sec_bound_file += '_'+s
      np.savetxt(sec_bound_file, sec_bounds)
      
    print '%d start running lbp for %s, m=%s, r1=%s'%(mpi.comm_rank, suffix, m, r1)
    
    filename_out = config.get_fastinf_res_file(d, suffix, m, r1)
    execute_lbp(filename, data_filename, filename_out, add_settings=add_sets)
コード例 #16
0
ファイル: evaluation.py プロジェクト: raldam/timely
 def setup(self):
     train_dataset = Dataset('test_pascal_train', force=True)
     dataset = Dataset('test_pascal_val', force=True)
     self.dp = DatasetPolicy(dataset, train_dataset, detector='perfect')
     self.evaluation = Evaluation(self.dp)
コード例 #17
0
def main():
  parser = argparse.ArgumentParser(description='Execute different functions of our system')
  parser.add_argument('mode',
    choices=[
      'window_stats', 'evaluate_metaparams', 'evaluate_jw',
      'evaluate_get_pos_windows', 'train_svm',
      'extract_sift','extract_assignments','extract_codebook',
      'evaluate_jw_grid', 'final_metaparams',
      'assemble_dpm_dets','ctfdet','assemble_ctf_dets'
      ])
  parser.add_argument('--test_dataset', choices=['val','test','train'],
      default='test', help='dataset to use for testing. the training dataset \
      is automatically inferred (val->train and test->trainval).')
  parser.add_argument('--first_n', type=int,
      help='only take the first N images in the datasets')
  parser.add_argument('--bounds', type=str,
      help='the start_time and deadline_time for the ImagePolicy and corresponding evaluation. ex: (1,5)')
  parser.add_argument('--name', help='name for this run')
  parser.add_argument('--priors', default='random', help= \
      "list of choice for the policy for selecting the next action. choose from random, oracle,fixed_order, no_smooth, backoff. ex: --priors=random,oracle,no_smooth")
  parser.add_argument('--compare_evals', action='store_true', 
      default=False, help='plot all the priors modes given on same plot'),
  parser.add_argument('--detector', choices=['perfect','perfect_with_noise', 'dpm','ctf'],
      default='perfect', help='detector type')
  parser.add_argument('--force', action='store_true', 
      default=False, help='force overwrite')
  parser.add_argument('--gist', action='store_true', 
      default=False, help='use GIST as one of the actions')
  parser.add_argument('--clear_tmp', action='store_true', 
      default=False, help='clear the cached windows folder before running'),
  parser.add_argument('--feature_type', choices=['sift','dsift'], 
      default='dsift', help='use this feature type'),
  parser.add_argument('--kernel', choices=['chi2','rbf'], 
      default='chi2', help='kernel to train svm on'),
      
  args = parser.parse_args()
  if args.priors:
    args.priors = args.priors.split(',')
  if args.bounds:
    args.bounds = [float(x) for x in re.findall(r'\d+', args.bounds)]
    assert(len(args.bounds)==2)
  print(args)

  # Load the dataset
  dataset = Dataset('full_pascal_'+args.test_dataset)
  if args.first_n:
    dataset.images = dataset.images[:args.first_n]

  # Infer train_dataset
  if args.test_dataset=='test':
    train_dataset = Dataset('full_pascal_trainval')
  elif args.test_dataset=='val':
    train_dataset = Dataset('full_pascal_train')
  else:
    print("Impossible, setting train_dataset to dataset")
    train_dataset = dataset
  
  # Create window generator
  sw = SlidingWindows(dataset,train_dataset)

  if args.clear_tmp:
    dirname = config.get_sliding_windows_cached_dir(train_dataset.get_name())
    shutil.rmtree(dirname)
    dirname = config.get_sliding_windows_cached_dir(dataset.get_name())
    shutil.rmtree(dirname)

  if args.mode=='assemble_dpm_dets':
    policy = DatasetPolicy(dataset,train_dataset,sw)
    dets = policy.load_ext_detections(dataset,suffix='dpm_may25')

  if args.mode=='assemble_ctf_dets':
    policy = DatasetPolicy(dataset,train_dataset,sw)
    dets = policy.load_ext_detections(dataset,'ctf','ctf_default')
    dets = policy.load_ext_detections(dataset,'ctf','ctf_nohal')
    dets = policy.load_ext_detections(dataset,'ctf', 'ctf_halfsize')

  if args.mode=='evaluate_get_pos_windows':
    evaluate_get_pos_windows(train_dataset)
    return

  if args.mode=='window_stats':
    "Compute and plot the statistics of ground truth window parameters."
    results = SlidingWindows.get_dataset_window_stats(train_dataset,plot=True)

  if args.mode=='ctfdet':
    """Run Pedersoli's detector on the dataset and assemble into one Table."""
    run_pedersoli(dataset)

  if args.mode=='evaluate_jw':
    """
    Evaluate the jumping window approach by producing plots of recall vs.
    #windows.
    """
    # TODO hack: both sw and jw should subclass something like WindowGenerator
    jw = JumpingWindowsDetector(use_scale=True)
    sw.jw = jw
    #classes = dataset.classes
    classes = ['car']
#    classes = ['bicycle' ,'car','horse', 'sofa',\
#               'bird',  'chair',     'motorbike', 'train',\
#               'boat',  'cow',       'person',    'tvmonitor',\
#               'bottle','diningtable',  'pottedplant',\
#               'bus','dog'     ,'sheep']
    for cls_idx in range(comm_rank, len(classes), comm_size):
    #for cls in dataset.classes:
      cls = classes[cls_idx]
      dirname = config.get_jumping_windows_dir(dataset.get_name())
      filename = os.path.join(dirname,'%s'%cls)
      sw.evaluate_recall(cls, filename, metaparams=None, mode='jw', plot=True)
  
  if args.mode=='evaluate_jw_grid':
    """
    Evaluate the jumping window approach by producing plots of recall vs.
    #windows.
    """
    sw = SlidingWindows(dataset,train_dataset)
    jw = JumpingWindowsDetectorGrid()
    sw.jw = jw
    for cls in dataset.classes:
      dirname = config.get_jumping_windows_dir(dataset.get_name())
      filename = os.path.join(dirname,'%s'%cls)
      if os.path.isfile(config.data_dir + 'JumpingWindows/'+cls):
        sw.evaluate_recall(cls, filename, metaparams=None, mode='jw', plot=True)

  if args.mode=='train_svm':
    randomize = not os.path.exists('/home/tobibaum')
    
    d = Dataset('full_pascal_train')
    dtest = Dataset('full_pascal_val')  
    e = Extractor()  
    classes = config.pascal_classes  
    num_words = 3000
    iters = 5
    feature_type = 'dsift'
    codebook_samples = 15
    num_pos = 'max'
    testsize = 'max'
    if args.first_n:
      num_pos = args.first_n
      testsize = 1.5*num_pos
     
    kernel = args.kernel
    
    if comm_rank == 0:
      ut.makedirs(config.data_dir + 'features/' + feature_type + '/times/')
      ut.makedirs(config.data_dir + 'features/' + feature_type + '/codebooks/times/')
      ut.makedirs(config.data_dir + 'features/' + feature_type + '/svms/train_times/')
      
    for cls_idx in range(comm_rank, len(classes), comm_size): 
    #for cls in classes:
      cls = classes[cls_idx]
      codebook = e.get_codebook(d, feature_type)
      pos_arr = d.get_pos_windows(cls)
      
      neg_arr = d.get_neg_windows(pos_arr.shape[0], cls, max_overlap=0)
      
      if not num_pos == 'max':    
        if not randomize:
          pos_arr = pos_arr[:num_pos]
          neg_arr = pos_arr[:num_pos]
        else:
          rand = np.random.random_integers(0, pos_arr.shape[0] - 1, size=num_pos)
          pos_arr = pos_arr[rand]
          rand = np.random.random_integers(0, neg_arr.shape[0] - 1, size=num_pos)
          neg_arr = neg_arr[rand]     
      pos_table = Table(pos_arr, ['x','y','w','h','img_ind'])
      neg_table = Table(neg_arr, pos_table.cols)      
      train_with_hard_negatives(d, dtest,  num_words,codebook_samples,codebook,\
                                cls, pos_table, neg_table,feature_type, \
                                iterations=iters, kernel=kernel, L=2, \
                                testsize=testsize,randomize=randomize)

  if args.mode=='evaluate_metaparams':
    """
    Grid search over metaparams values for get_windows_new, with the AUC of
    recall vs. # windows evaluation.
    """
    sw.grid_search_over_metaparams()
    return

  if args.mode=='final_metaparams':
    dirname = config.get_sliding_windows_metaparams_dir(train_dataset.get_name())
    # currently these are the best auc/complexity params
    best_params_for_classes = [
        (62,15,12,'importance',0), #aeroplane
        (83,15,12,'importance',0), #bicycle
        (62,15,12,'importance',0), #bird
        (62,15,12,'importance',0), #boat
        (125,12,12,'importance',0), #bottle
        (83,12,9,'importance',0), #bus
        (125,15,9,'importance',0), #car
        (125,12,12,'linear',0), #cat
        (125,15,9,'importance',0), #chair
        (125,9,6,'importance',0), #cow
        (125,15,6,'linear',0), #diningtable
        (62,15,12,'importance',0), #dog
        (83,15,6,'importance',0), #horse
        (83,12,6,'importance',0), #motorbike
        (83,15,12,'importance',0), #person
        (83,15,6,'importance',0), #pottedplant
        (83,15,12,'importance',0), #sheep
        (83,9,6,'importance',0), #sofa
        (62,12,6,'importance',0), #train
        (62,12,12,'importance',0), #tvmonitor
        (125,9,12,'importance',0) #all
        ]
    # ACTUALLY THEY ARE ALL THE SAME!
    cheap_params = (62, 9, 6, 'importance', 0)
    for i in range(comm_rank,dataset.num_classes(),comm_size):
      cls = dataset.classes[i]
      best_params = best_params_for_classes[i]
      #samples,num_scales,num_ratios,mode,priority,cls = cheap_params

      metaparams = {
        'samples_per_500px': samples,
        'num_scales': num_scales,
        'num_ratios': num_ratios,
        'mode': mode,
        'priority': 0 }
      filename = '%s_%d_%d_%d_%s_%d'%(
          cls,
          metaparams['samples_per_500px'],
          metaparams['num_scales'],
          metaparams['num_ratios'],
          metaparams['mode'],
          metaparams['priority'])
      filename = os.path.join(dirname,filename)

      tables = sw.evaluate_recall(cls,filename,metaparams,'sw',plot=True,force=False)

      metaparams = {
        'samples_per_500px': samples,
        'num_scales': num_scales,
        'num_ratios': num_ratios,
        'mode': mode,
        'priority': 1 }
      filename = '%s_%d_%d_%d_%s_%d'%(
          cls,
          metaparams['samples_per_500px'],
          metaparams['num_scales'],
          metaparams['num_ratios'],
          metaparams['mode'],
          metaparams['priority'])
      filename = os.path.join(dirname,filename)

      tables = sw.evaluate_recall(cls,filename,metaparams,'sw',plot=True,force=False)
    return

  if args.mode=='extract_sift':
    e=Extractor()
    e.extract_all(['sift'], ['full_pascal_trainval','full_pascal_test'], 0, 0) 
    
  if args.mode=='extract_assignments':
    e=Extractor()
    feature_type = 'sift'
    for image_set in ['full_pascal_trainval','full_pascal_test']:
      d = Dataset(image_set)
      codebook = e.get_codebook(d, feature_type)  
      print 'codebook loaded'
      
      for img_ind in range(comm_rank,len(d.images),comm_size):
        img = d.images[img_ind]
      #for img in d.images:
        e.get_assignments(np.array([0,0,img.size[0],img.size[1]]), feature_type, \
                          codebook, img)

  if args.mode=='extract_codebook':
    d = Dataset('full_pascal_trainval')
    e = Extractor()
    codebook = e.get_codebook(d, args.feature_type)
コード例 #18
0
def run_fastinf_different_settings(d, ms, rs, suffixs, num_bins = 5):
  
  settings = list(itertools.product(suffixs, ms, rs))
  table_gt = d.get_cls_ground_truth().arr.astype(int)
  print 'run with a total of %d settings'%len(settings)
  
  for setindx in range(comm_rank, len(settings), comm_size):
    second_table = None
    setin = settings[setindx]
    suffix = setin[0]
    m = str(setin[1])
    r1 = str(setin[2])
    
    print 'node %d runs %s, m=%s, r1=%s'%(comm_rank, suffix, m, r1)

    filename = config.get_fastinf_mrf_file(d, suffix)
    data_filename = config.get_fastinf_data_file(d, suffix)
    
    if suffix == 'perfect':      
      table = np.hstack((table_gt, table_gt))
      bounds = np.tile(np.linspace(0, 1, num_bins+1),(table_gt.shape[1],1))
      print bounds
      
    elif suffix == 'GIST':
      table = gist_classify_dataset(d)   
      bounds, discr_table = discretize_table(table, num_bins)  
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      print filename_csc
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC_regions':
      rm = RegionModel("1big_2small", 0.5)
      detector = 'csc_default'
      from synthetic.dataset_policy import DatasetPolicy
      orig_table = DatasetPolicy.load_ext_detections(d, detector)            
      gt = d.get_det_gt().copy()
      # we need to spice up the gt by a score of 1 for each class (results in less code)
      gt.cols.append('score')
      gt.arr = np.hstack((gt.arr, np.ones((gt.shape[0], 1))))  
      table_gt_region = create_regioned_table(rm, gt, d.images, len(d.classes))
      # At this point we need to split them for the different regions
      orig_table_region = create_regioned_table(rm, orig_table, d.images, len(d.classes))
      
      bounds, discr_table_region = discretize_table(orig_table_region, num_bins)
      table = np.hstack((table_gt_region, discr_table_region))
      
    elif suffix == 'GIST_CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)      
      table = np.hstack((table_gt, discr_table))
      store_bound(d, 'CSC', bounds)
      
      second_table = gist_classify_dataset(d)      
      sec_bounds, second_table = discretize_table(second_table, num_bins)      
      store_bound(d, 'GIST', sec_bounds)
      
      full_bound = np.hstack((sec_bounds, bounds))
      store_bound(d, 'GIST_CSC', full_bound)
    
    if not suffix == 'GIST_CSC':
      store_bound(d, suffix, bounds)
    
    print 'set up table on %d, write out mrf for %s, m=%s, r1=%s'%(comm_rank, suffix, m, r1)   
      
    write_out_mrf(table, num_bins, filename, data_filename, second_table=second_table)
    
    add_sets = ['-m',m]
    if not r1 == '':
      add_sets += ['-r1', r1]
          
    if not second_table == None:
      sec_bound_file = '%s_secbounds'%filename
      for s in add_sets:
        sec_bound_file += '_'+s
      np.savetxt(sec_bound_file, sec_bounds)
      
    print '%d start running lbp for %s, m=%s, r1=%s'%(comm_rank, suffix, m, r1)
    
    filename_out = config.get_fastinf_res_file(d, suffix, m, r1)
    execute_lbp(filename, data_filename, filename_out, add_settings=add_sets)
コード例 #19
0
ファイル: run_experiment.py プロジェクト: raldam/timely
def main():
  parser = argparse.ArgumentParser(
    description="Run experiments with the timely detection system.")

  parser.add_argument('--test_dataset',
    choices=['val','test','trainval'],
    default='val',
    help="""Dataset to use for testing. Run on val until final runs.
    The training dataset is inferred (val->train; test->trainval; trainval->trainval).""")

  parser.add_argument('--first_n', type=int,
    help='only take the first N images in the test dataset')

  parser.add_argument('--first_n_train', type=int,
    help='only take the first N images in the train dataset')

  parser.add_argument('--config',
    help="""Config file name that specifies the experiments to run.
    Give name such that the file is configs/#{name}.json or configs/#{name}/
    In the latter case, all files within the directory will be loaded.""")

  parser.add_argument('--suffix',
    help="Overwrites the suffix in the config(s).")

  parser.add_argument('--bounds10', action='store_true', 
    default=False, help='set bounds to [0,10]')
  
  parser.add_argument('--bounds515', action='store_true', 
    default=False, help='set bounds to [5,15]')

  parser.add_argument('--force', action='store_true', 
    default=False, help='force overwrite')

  parser.add_argument('--wholeset_prs', action='store_true', 
    default=False, help='evaluate in the final p-r regime')

  parser.add_argument('--no_apvst', action='store_true', 
    default=False, help='do NOT evaluate in the ap vs. time regime')

  parser.add_argument('--det_configs', action='store_true', 
    default=False, help='output detector statistics to det_configs')

  parser.add_argument('--inverse_prior', action='store_true', 
    default=False, help='use inverse prior class values')

  args = parser.parse_args()
  print(args)

  # If config file is not given, just run one experiment using default config
  if not args.config:
    configs = [DatasetPolicy.default_config]
  else:
    configs = load_configs(args.config)

  # Load the dataset
  dataset = Dataset('full_pascal_'+args.test_dataset)
  if args.first_n:
    dataset.images = dataset.images[:args.first_n]

  # Infer train_dataset
  if args.test_dataset=='test':
    train_dataset = Dataset('full_pascal_trainval')
  elif args.test_dataset=='val':
    train_dataset = Dataset('full_pascal_train')
  elif args.test_dataset=='trainval':
    train_dataset = Dataset('full_pascal_trainval')
  else:
    None # impossible by argparse settings
  
  # Only need to set training dataset values; evaluation gets it from there
  if args.inverse_prior:
    train_dataset.set_values('inverse_prior')

  # TODO: hack
  if args.first_n_train:
    train_dataset.images = train_dataset.images[:args.first_n_train]

  # In both the above cases, we use the val dataset for weights
  weights_dataset_name = 'full_pascal_val'

  dets_tables = []
  dets_tables_whole = []
  clses_tables_whole = []
  all_bounds = []
      
  plot_infos = [] 
  for config_f in configs:
    if args.suffix:
      config_f['suffix'] = args.suffix
    if args.bounds10:
      config_f['bounds'] = [0,10]
    if args.bounds515:
      config_f['bounds'] = [5,15]
    assert(not (args.bounds10 and args.bounds515))
    if args.inverse_prior:
      config_f['suffix'] += '_inverse_prior'
      config_f['values'] = 'inverse_prior'

    dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f)
    ev = Evaluation(dp)
    all_bounds.append(dp.bounds)
    plot_infos.append(dict((k,config_f[k]) for k in ('label','line','color') if k in config_f))
    # output the det configs first
    if args.det_configs:
      dp.output_det_statistics()

    # evaluate in the AP vs. Time regime, unless told not to
    if not args.no_apvst:
      dets_table = ev.evaluate_vs_t(None,None,force=args.force)
      #dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force)
      if comm_rank==0:
        dets_tables.append(dets_table)
        #dets_tables_whole.append(dets_table_whole)
        #clses_tables_whole.append(clses_table_whole)

    # optionally, evaluate in the standard PR regime
    if args.wholeset_prs:
      ev.evaluate_detections_whole(None,force=args.force)

  # and plot the comparison if multiple config files were given
  if not args.no_apvst and len(configs)>1 and comm_rank==0:
    # filename of the final plot is the config file name
    dirname = config.get_evals_dir(dataset.get_name())
    filename = args.config
    if args.inverse_prior:
      filename += '_inverse_prior'
    
    # det avg
    ff = opjoin(dirname, '%s_det_avg.png'%filename)
    ff_nl = opjoin(dirname, '%s_det_avg_nl.png'%filename)

    # make sure directory exists
    ut.makedirs(os.path.dirname(ff))
    
    Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
    Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

    if False:
      # det whole
      ff = opjoin(dirname, '%s_det_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_det_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

      # cls whole
      ff = opjoin(dirname, '%s_cls_whole.png'%filename)
      ff_nl = opjoin(dirname, '%s_cls_whole_nl.png'%filename)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
      Evaluation.plot_ap_vs_t(clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)
コード例 #20
0
def main():
    parser = argparse.ArgumentParser(description="Run experiments with the timely detection system.")

    parser.add_argument(
        "--test_dataset",
        choices=["val", "test", "trainval"],
        default="val",
        help="""Dataset to use for testing. Run on val until final runs.
    The training dataset is inferred (val->train; test->trainval; trainval->trainval).""",
    )

    parser.add_argument("--first_n", type=int, help="only take the first N images in the test dataset")

    parser.add_argument("--first_n_train", type=int, help="only take the first N images in the train dataset")

    parser.add_argument(
        "--config",
        help="""Config file name that specifies the experiments to run.
    Give name such that the file is configs/#{name}.json or configs/#{name}/
    In the latter case, all files within the directory will be loaded.""",
    )

    parser.add_argument("--suffix", help="Overwrites the suffix in the config(s).")

    parser.add_argument("--bounds10", action="store_true", default=False, help="set bounds to [0,10]")

    parser.add_argument("--bounds515", action="store_true", default=False, help="set bounds to [5,15]")

    parser.add_argument("--force", action="store_true", default=False, help="force overwrite")

    parser.add_argument("--wholeset_prs", action="store_true", default=False, help="evaluate in the final p-r regime")

    parser.add_argument(
        "--no_apvst", action="store_true", default=False, help="do NOT evaluate in the ap vs. time regime"
    )

    parser.add_argument(
        "--det_configs", action="store_true", default=False, help="output detector statistics to det_configs"
    )

    parser.add_argument("--inverse_prior", action="store_true", default=False, help="use inverse prior class values")

    args = parser.parse_args()
    print(args)

    # If config file is not given, just run one experiment using default config
    if not args.config:
        configs = [DatasetPolicy.default_config]
    else:
        configs = load_configs(args.config)

    # Load the dataset
    dataset = Dataset("full_pascal_" + args.test_dataset)
    if args.first_n:
        dataset.images = dataset.images[: args.first_n]

    # Infer train_dataset
    if args.test_dataset == "test":
        train_dataset = Dataset("full_pascal_trainval")
    elif args.test_dataset == "val":
        train_dataset = Dataset("full_pascal_train")
    elif args.test_dataset == "trainval":
        train_dataset = Dataset("full_pascal_trainval")
    else:
        None  # impossible by argparse settings

    # Only need to set training dataset values; evaluation gets it from there
    if args.inverse_prior:
        train_dataset.set_values("inverse_prior")

    # TODO: hack
    if args.first_n_train:
        train_dataset.images = train_dataset.images[: args.first_n_train]

    # In both the above cases, we use the val dataset for weights
    weights_dataset_name = "full_pascal_val"

    dets_tables = []
    dets_tables_whole = []
    clses_tables_whole = []
    all_bounds = []

    plot_infos = []
    for config_f in configs:
        if args.suffix:
            config_f["suffix"] = args.suffix
        if args.bounds10:
            config_f["bounds"] = [0, 10]
        if args.bounds515:
            config_f["bounds"] = [5, 15]
        assert not (args.bounds10 and args.bounds515)
        if args.inverse_prior:
            config_f["suffix"] += "_inverse_prior"
            config_f["values"] = "inverse_prior"

        dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f)
        ev = Evaluation(dp)
        all_bounds.append(dp.bounds)
        plot_infos.append(dict((k, config_f[k]) for k in ("label", "line", "color") if k in config_f))
        # output the det configs first
        if args.det_configs:
            dp.output_det_statistics()

        # evaluate in the AP vs. Time regime, unless told not to
        if not args.no_apvst:
            dets_table = ev.evaluate_vs_t(None, None, force=args.force)
            # dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force)
            if comm_rank == 0:
                dets_tables.append(dets_table)
                # dets_tables_whole.append(dets_table_whole)
                # clses_tables_whole.append(clses_table_whole)

        # optionally, evaluate in the standard PR regime
        if args.wholeset_prs:
            ev.evaluate_detections_whole(None, force=args.force)

    # and plot the comparison if multiple config files were given
    if not args.no_apvst and len(configs) > 1 and comm_rank == 0:
        # filename of the final plot is the config file name
        dirname = config.get_evals_dir(dataset.get_name())
        filename = args.config
        if args.inverse_prior:
            filename += "_inverse_prior"

        # det avg
        ff = opjoin(dirname, "%s_det_avg.png" % filename)
        ff_nl = opjoin(dirname, "%s_det_avg_nl.png" % filename)

        # make sure directory exists
        ut.makedirs(os.path.dirname(ff))

        Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos)
        Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)

        if False:
            # det whole
            ff = opjoin(dirname, "%s_det_whole.png" % filename)
            ff_nl = opjoin(dirname, "%s_det_whole_nl.png" % filename)
            Evaluation.plot_ap_vs_t(
                dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos
            )
            Evaluation.plot_ap_vs_t(
                dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos
            )

            # cls whole
            ff = opjoin(dirname, "%s_cls_whole.png" % filename)
            ff_nl = opjoin(dirname, "%s_cls_whole_nl.png" % filename)
            Evaluation.plot_ap_vs_t(
                clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos
            )
            Evaluation.plot_ap_vs_t(
                clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos
            )
コード例 #21
0
 def setup(self):
     d = Dataset('test_pascal_trainval', force=True)
     d2 = Dataset('test_pascal_test', force=True)
     config = {'detectors': ['csc_default']}
     self.dp = DatasetPolicy(d, d2, **config)
     self.bs = BeliefState(d, self.dp.actions)