示例#1
0
文件: dataset.py 项目: raldam/timely
class TestDatasetJson:
    def setup(self):
        self.d = Dataset('test_data1', force=True)
        self.classes = ["A", "B", "C"]

    def test_load(self):
        assert (self.d.num_images() == 4)
        assert (self.d.classes == self.classes)

    def test_get_det_gt(self):
        gt = self.d.get_det_gt(with_diff=True, with_trun=False)
        df = Table(
            np.array([[0., 0., 0., 0., 0., 0, 0, 0.],
                      [1., 1., 1., 1., 1., 0, 0, 0.],
                      [1., 1., 1., 0., 0., 0, 0, 1.],
                      [0., 0., 0., 0., 1., 0, 0, 2.],
                      [0., 0., 0., 0., 2., 0, 0, 3.],
                      [1., 1., 1., 1., 2., 0, 0, 3.]]),
            ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind'])
        print(gt)
        print(df)
        assert (gt == df)

    def test_get_cls_counts_json(self):
        arr = np.array([[1, 1, 0], [1, 0, 0], [0, 1, 0], [0, 0, 2]])
        print(self.d.get_cls_counts())
        assert (np.all(self.d.get_cls_counts() == arr))

    def test_get_cls_ground_truth_json(self):
        table = Table(
            np.array([[True, True, False], [True, False, False],
                      [False, True, False], [False, False, True]]),
            ["A", "B", "C"])
        assert (self.d.get_cls_ground_truth() == table)

    def test_det_ground_truth_for_class_json(self):
        gt = self.d.get_det_gt_for_class("A", with_diff=True, with_trun=True)
        arr = np.array([[0., 0., 0., 0., 0., 0., 0, 0.],
                        [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert (np.all(gt.arr == arr))
        assert (gt.cols == cols)

        # no diff or trun
        gt = self.d.get_det_gt_for_class("A", with_diff=False, with_trun=False)
        arr = np.array([[0., 0., 0., 0., 0., 0., 0, 0.],
                        [1., 1., 1., 0., 0., 0., 0., 1.]])
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'diff', 'trun', 'img_ind']
        print(gt.arr)
        assert (np.all(gt.arr == arr))
        assert (gt.cols == cols)

    def test_set_values(self):
        assert (np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_values('uniform')
        assert (np.all(self.d.values == 1 / 3. * np.ones(len(self.classes))))
        self.d.set_values('inverse_prior')
        print(self.d.values)
        assert (np.all(self.d.values == np.array([0.25, 0.25, 0.5])))
示例#2
0
def compute_error_vs_iterations(suffix, num_images, dataset):
    # assemble truth

    d = Dataset(dataset)
    truth = d.get_cls_ground_truth().arr
    truth = np.random.permutation(truth)[:num_images, :]
    num_classes = truth.shape[1]
    tt = ut.TicToc()

    lbp_times = [0] + [10**x for x in range(3)]
    lbp_times += [1000 + 1000 * x for x in range(10)]
    lbp_times += [10**x for x in [5]]
    #lbp_times = [3000]

    all_scores = np.zeros((num_classes, len(lbp_times), num_classes))
    all_times = np.zeros((num_classes, len(lbp_times)))

    counter = 0
    # do inference
    for itdex in range(len(lbp_times)):
        fm = FastinfModel(d, 'perfect', num_classes, lbp_time=lbp_times[itdex])
        for rowdex in range(comm_rank, truth.shape[0], comm_size):  # parallel
            obs = truth[rowdex, :].astype(int)
            taken = np.zeros(num_classes).astype(int)

            for num_obser in range(num_classes):
                counter += 1
                taken[np.argmax(fm.p_c - taken)] = 1

                tt.tic()
                fm.update_with_observations(taken, obs)
                utime = tt.toc(quiet=True)
                curr_score = compute_sq_error(obs, fm.p_c)
                all_scores[num_obser,
                           itdex, :] = np.add(all_scores[num_obser, itdex, :],
                                              curr_score)

                all_times[num_obser, itdex] += utime
                print '%d is at %d / %d :'%(comm_rank, counter, len(lbp_times)* \
                                               num_classes*num_images/float(comm_size)),curr_score

    all_scores /= num_images
    all_times /= num_images

    safebarrier(comm)
    all_scores = comm.reduce(all_scores)
    all_times = comm.reduce(all_times)
    if comm_rank == 0:  #parallel
        outfile = open('all_scores_' + suffix, 'w')
        cPickle.dump(all_scores, outfile)
        outfile.close()
        outfile = open('all_times_' + suffix, 'w')
        cPickle.dump(all_times, outfile)
        outfile.close()
示例#3
0
def compute_error_vs_iterations(suffix, num_images, dataset):
  # assemble truth
  
  d = Dataset(dataset)
  truth = d.get_cls_ground_truth().arr
  truth = np.random.permutation(truth)[:num_images,:]  
  num_classes = truth.shape[1]  
  tt = ut.TicToc()
  
  lbp_times = [0] + [10**x for x in range(3)]
  lbp_times += [1000+1000*x for x in range(10)]
  lbp_times += [10**x for x in [5]]  
  #lbp_times = [3000]

  all_scores = np.zeros((num_classes, len(lbp_times), num_classes))
  all_times = np.zeros((num_classes, len(lbp_times)))
      
  counter = 0
  # do inference
  for itdex in range(len(lbp_times)):
    fm = FastinfModel(d, 'perfect', num_classes, lbp_time=lbp_times[itdex])
    for rowdex in range(comm_rank, truth.shape[0], comm_size): # parallel
      obs = truth[rowdex,:].astype(int)
      taken = np.zeros(num_classes).astype(int)
      
      for num_obser in range(num_classes):            
        counter += 1        
        taken[np.argmax(fm.p_c-taken)] = 1
        
        tt.tic()
        fm.update_with_observations(taken, obs)
        utime = tt.toc(quiet=True)     
        curr_score = compute_sq_error(obs, fm.p_c) 
        all_scores[num_obser, itdex, :] = np.add(all_scores[num_obser, itdex, :], curr_score)
          
        all_times[num_obser, itdex] += utime
        print '%d is at %d / %d :'%(comm_rank, counter, len(lbp_times)* \
                                       num_classes*num_images/float(comm_size)),curr_score
    
  all_scores /= num_images
  all_times /= num_images        
    
  safebarrier(comm)
  all_scores = comm.reduce(all_scores)
  all_times = comm.reduce(all_times)
  if comm_rank == 0: #parallel
    outfile = open('all_scores_'+suffix,'w')
    cPickle.dump(all_scores,outfile)
    outfile.close()
    outfile = open('all_times_'+suffix,'w')
    cPickle.dump(all_times,outfile)
    outfile.close()    
示例#4
0
class TestEvaluationPerfect:
  def __init__(self):
    self.csc_trainval = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'csc_trainval'), 'r'))
    self.csc_test = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'csc_test'), 'r'))
    self.ext_csc_test = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'ext_csc_test'), 'r'))
    self.ext_csc_trainval = cPickle.load(open(os.path.join(config.get_ext_test_support_dir(), 'ext_csc_trainval'), 'r'))
    self.d_train = Dataset('full_pascal_trainval')
    self.trainval_gt = self.d_train.get_cls_ground_truth()
    self.d_test = Dataset('full_pascal_test')
    self.test_gt = self.d_test.get_cls_ground_truth()
  
  def setup(self):
    train_dataset = Dataset('test_pascal_train',force=True)
    dataset = Dataset('test_pascal_val',force=True)
    self.dp = DatasetPolicy(dataset,train_dataset,detector='perfect')
    self.evaluation = Evaluation(self.dp)    

  def test_compute_pr_multiclass(self):
    cols = ['x','y','w','h','cls_ind','img_ind','diff'] 
    dets_cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind', 'img_ind']
    
    # two objects of different classes in the image, perfect detection
    arr = np.array(
        [ [0,0,10,10,0,0,0],
          [10,10,10,10,1,0,0] ])
    gt = Table(arr,cols)

    dets_arr = np.array(
        [ [0,0,10,10,-1,-1,0,0],
          [10,10,10,10,-1,-1,1,0] ]) 
    dets = Table(dets_arr,dets_cols)
    
    # make sure gt and gt_cols aren't modified
    gt_arr_copy = gt.arr.copy()
    gt_cols_copy = list(gt.cols)
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    assert(np.all(gt.arr == gt_arr_copy))
    assert(gt_cols_copy == gt.cols)

    correct_ap = 1
    correct_rec = np.array([0.5,1])
    correct_prec = np.array([1,1])
    print((ap, rec, prec))
    assert(correct_ap == ap)
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # some extra detections to generate false positives
    dets_arr = np.array(
        [ [0,0,10,10,-1,-1,0,0],
          [0,0,10,10,0,-1,0,0],
          [10,10,10,10,0,-1,1,0],
          [10,10,10,10,-1,-1,1,0] ]) 
    dets = Table(dets_arr,dets_cols)

    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_rec = np.array([0.5,1,1,1])
    correct_prec = np.array([1,1,2./3,0.5])
    print((ap, rec, prec))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # confirm that running on the same dets gives the same answer
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_rec = np.array([0.5,1,1,1])
    correct_prec = np.array([1,1,2./3,0.5])
    print((ap, rec, prec))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # now let's add two objects of a different class to gt to lower recall
    arr = np.array(
        [ [0,0,10,10,0,0,0],
          [10,10,10,10,1,0,0],
          [20,20,10,10,2,0,0],
          [30,30,10,10,2,0,0] ])
    gt = Table(arr,cols)
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_rec = np.array([0.25,0.5,0.5,0.5])
    correct_prec = np.array([1,1,2./3,0.5])
    print((ap, rec, prec))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))

    # now call it with empty detections
    dets_arr = np.array([])
    dets = Table(dets_arr,dets_cols)
    ap,rec,prec = self.evaluation.compute_det_pr(dets, gt)
    correct_ap = 0
    correct_rec = np.array([0])
    correct_prec = np.array([0])
    print((ap, rec, prec))
    assert(np.all(correct_ap==ap))
    assert(np.all(correct_rec==rec))
    assert(np.all(correct_prec==prec))
  
  def test_compute_cls_map(self):
    res = Evaluation.compute_cls_map(self.csc_trainval, self.trainval_gt)
    assert(round(res,11) == 0.47206391958)
    
  def test_compute_cls_map_half(self):
    table_csc_half = Table()
    table_csc_half.cols = list(self.csc_trainval.cols)
    for _ in range(10):
      rand_inds = np.random.permutation(range(5011))[:2500]
      table_csc_half.arr = self.csc_trainval.arr[rand_inds,:]      
      res = Evaluation.compute_cls_map(table_csc_half, self.trainval_gt)
      assert(round(res,6) > .45)
  
  def test_compute_cls_map_gt(self):
    res = Evaluation.compute_cls_map(self.trainval_gt, self.trainval_gt)
    assert(round(res,6) == 1)
    
  def test_compute_cls_map_gt_half(self):
    rand_inds = np.random.permutation(range(5011))[:2500]
    table_gt_half = Table()
    table_gt_half.arr = np.hstack((self.trainval_gt.arr,np.array(np.arange(5011), ndmin=2).T))
    table_gt_half.arr = table_gt_half.arr[rand_inds,:]
    table_gt_half.cols = list(self.trainval_gt.cols) + ['img_ind']
    res = Evaluation.compute_cls_map(table_gt_half, self.trainval_gt)
    assert(round(res,6) == 1)
  
  def test_compute_cls_map_random_clf(self):
    clf_table = Table()
    num_test = 10
    ress = np.zeros((num_test,))
    for idx in range(num_test):
      clf_table.arr = np.hstack((np.random.rand(5011, 20),np.array(np.arange(5011), ndmin=2).T))
      clf_table.cols = list(self.trainval_gt.cols) + ['img_ind']
      res = Evaluation.compute_cls_map(clf_table, self.trainval_gt)
      ress[idx] = res
    assert(np.mean(ress) < 0.09)
  
  def test_other_scores(self):
    print 'csc_test', Evaluation.compute_cls_map(self.csc_test, self.test_gt)
    print 'csc_trainval', Evaluation.compute_cls_map(self.csc_trainval, self.trainval_gt)
    
    print 'ext_test', Evaluation.compute_cls_map(self.ext_csc_test, self.test_gt)
    print 'ext_trainval', Evaluation.compute_cls_map(self.ext_csc_trainval, self.trainval_gt)
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_test), 'dp', 'table_chi2_20')
    ext_table_chi2_20 = cPickle.load(open(filename, 'r'))
    print 'ext_chi2_20_test', Evaluation.compute_cls_map(ext_table_chi2_20, self.test_gt)    
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_train), 'dp', 'table_chi2_20')
    ext_table_chi2_20_tv = cPickle.load(open(filename, 'r'))
    print 'ext_chi2_20_trainval', Evaluation.compute_cls_map(ext_table_chi2_20_tv, self.trainval_gt)
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_test), 'dp', 'table_rbf_20')
    ext_table_rbf_20 = cPickle.load(open(filename, 'r'))
    print 'ext_rbf_20_test', Evaluation.compute_cls_map(ext_table_rbf_20, self.test_gt)    
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_train), 'dp', 'table_rbf_20')
    ext_table_rbf_20_tv = cPickle.load(open(filename, 'r'))
    print 'ext_rbf_20_trainval', Evaluation.compute_cls_map(ext_table_rbf_20_tv, self.trainval_gt)
       
    filename = os.path.join(config.get_ext_dets_foldname(self.d_test), 'dp', 'table_linear_20')
    ext_linear_20_test = cPickle.load(open(filename, 'r'))
    print 'ext_linear_test', Evaluation.compute_cls_map(ext_linear_20_test, self.test_gt)
    
    filename = os.path.join(config.get_ext_dets_foldname(self.d_train), 'dp', 'table_linear_20')
    ext_table_linear_20 = cPickle.load(open(filename, 'r'))
    print 'ext_linear_20_trainval', Evaluation.compute_cls_map(ext_table_linear_20, self.trainval_gt)    
        
    filename = 'tab_linear_5'
    ext_tab_lin_5 = cPickle.load(open(filename, 'r'))
    print 'ext_tab_lin_5_trainval', Evaluation.compute_cls_map(ext_tab_lin_5, self.trainval_gt)    
示例#5
0
class TestEvaluationPerfect:
    def __init__(self):
        self.csc_trainval = cPickle.load(
            open(
                os.path.join(config.get_ext_test_support_dir(),
                             'csc_trainval'), 'r'))
        self.csc_test = cPickle.load(
            open(os.path.join(config.get_ext_test_support_dir(), 'csc_test'),
                 'r'))
        self.ext_csc_test = cPickle.load(
            open(
                os.path.join(config.get_ext_test_support_dir(),
                             'ext_csc_test'), 'r'))
        self.ext_csc_trainval = cPickle.load(
            open(
                os.path.join(config.get_ext_test_support_dir(),
                             'ext_csc_trainval'), 'r'))
        self.d_train = Dataset('full_pascal_trainval')
        self.trainval_gt = self.d_train.get_cls_ground_truth()
        self.d_test = Dataset('full_pascal_test')
        self.test_gt = self.d_test.get_cls_ground_truth()

    def setup(self):
        train_dataset = Dataset('test_pascal_train', force=True)
        dataset = Dataset('test_pascal_val', force=True)
        self.dp = DatasetPolicy(dataset, train_dataset, detector='perfect')
        self.evaluation = Evaluation(self.dp)

    def test_compute_pr_multiclass(self):
        cols = ['x', 'y', 'w', 'h', 'cls_ind', 'img_ind', 'diff']
        dets_cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind', 'img_ind']

        # two objects of different classes in the image, perfect detection
        arr = np.array([[0, 0, 10, 10, 0, 0, 0], [10, 10, 10, 10, 1, 0, 0]])
        gt = Table(arr, cols)

        dets_arr = np.array([[0, 0, 10, 10, -1, -1, 0, 0],
                             [10, 10, 10, 10, -1, -1, 1, 0]])
        dets = Table(dets_arr, dets_cols)

        # make sure gt and gt_cols aren't modified
        gt_arr_copy = gt.arr.copy()
        gt_cols_copy = list(gt.cols)
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        assert (np.all(gt.arr == gt_arr_copy))
        assert (gt_cols_copy == gt.cols)

        correct_ap = 1
        correct_rec = np.array([0.5, 1])
        correct_prec = np.array([1, 1])
        print((ap, rec, prec))
        assert (correct_ap == ap)
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # some extra detections to generate false positives
        dets_arr = np.array([[0, 0, 10, 10, -1, -1, 0, 0],
                             [0, 0, 10, 10, 0, -1, 0, 0],
                             [10, 10, 10, 10, 0, -1, 1, 0],
                             [10, 10, 10, 10, -1, -1, 1, 0]])
        dets = Table(dets_arr, dets_cols)

        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.5, 1, 1, 1])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # confirm that running on the same dets gives the same answer
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.5, 1, 1, 1])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # now let's add two objects of a different class to gt to lower recall
        arr = np.array([[0, 0, 10, 10, 0, 0, 0], [10, 10, 10, 10, 1, 0, 0],
                        [20, 20, 10, 10, 2, 0, 0], [30, 30, 10, 10, 2, 0, 0]])
        gt = Table(arr, cols)
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_rec = np.array([0.25, 0.5, 0.5, 0.5])
        correct_prec = np.array([1, 1, 2. / 3, 0.5])
        print((ap, rec, prec))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

        # now call it with empty detections
        dets_arr = np.array([])
        dets = Table(dets_arr, dets_cols)
        ap, rec, prec = self.evaluation.compute_det_pr(dets, gt)
        correct_ap = 0
        correct_rec = np.array([0])
        correct_prec = np.array([0])
        print((ap, rec, prec))
        assert (np.all(correct_ap == ap))
        assert (np.all(correct_rec == rec))
        assert (np.all(correct_prec == prec))

    def test_compute_cls_map(self):
        res = Evaluation.compute_cls_map(self.csc_trainval, self.trainval_gt)
        assert (round(res, 11) == 0.47206391958)

    def test_compute_cls_map_half(self):
        table_csc_half = Table()
        table_csc_half.cols = list(self.csc_trainval.cols)
        for _ in range(10):
            rand_inds = np.random.permutation(range(5011))[:2500]
            table_csc_half.arr = self.csc_trainval.arr[rand_inds, :]
            res = Evaluation.compute_cls_map(table_csc_half, self.trainval_gt)
            assert (round(res, 6) > .45)

    def test_compute_cls_map_gt(self):
        res = Evaluation.compute_cls_map(self.trainval_gt, self.trainval_gt)
        assert (round(res, 6) == 1)

    def test_compute_cls_map_gt_half(self):
        rand_inds = np.random.permutation(range(5011))[:2500]
        table_gt_half = Table()
        table_gt_half.arr = np.hstack(
            (self.trainval_gt.arr, np.array(np.arange(5011), ndmin=2).T))
        table_gt_half.arr = table_gt_half.arr[rand_inds, :]
        table_gt_half.cols = list(self.trainval_gt.cols) + ['img_ind']
        res = Evaluation.compute_cls_map(table_gt_half, self.trainval_gt)
        assert (round(res, 6) == 1)

    def test_compute_cls_map_random_clf(self):
        clf_table = Table()
        num_test = 10
        ress = np.zeros((num_test, ))
        for idx in range(num_test):
            clf_table.arr = np.hstack(
                (np.random.rand(5011, 20), np.array(np.arange(5011),
                                                    ndmin=2).T))
            clf_table.cols = list(self.trainval_gt.cols) + ['img_ind']
            res = Evaluation.compute_cls_map(clf_table, self.trainval_gt)
            ress[idx] = res
        assert (np.mean(ress) < 0.09)

    def test_other_scores(self):
        print 'csc_test', Evaluation.compute_cls_map(self.csc_test,
                                                     self.test_gt)
        print 'csc_trainval', Evaluation.compute_cls_map(
            self.csc_trainval, self.trainval_gt)

        print 'ext_test', Evaluation.compute_cls_map(self.ext_csc_test,
                                                     self.test_gt)
        print 'ext_trainval', Evaluation.compute_cls_map(
            self.ext_csc_trainval, self.trainval_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_test),
                                'dp', 'table_chi2_20')
        ext_table_chi2_20 = cPickle.load(open(filename, 'r'))
        print 'ext_chi2_20_test', Evaluation.compute_cls_map(
            ext_table_chi2_20, self.test_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_train),
                                'dp', 'table_chi2_20')
        ext_table_chi2_20_tv = cPickle.load(open(filename, 'r'))
        print 'ext_chi2_20_trainval', Evaluation.compute_cls_map(
            ext_table_chi2_20_tv, self.trainval_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_test),
                                'dp', 'table_rbf_20')
        ext_table_rbf_20 = cPickle.load(open(filename, 'r'))
        print 'ext_rbf_20_test', Evaluation.compute_cls_map(
            ext_table_rbf_20, self.test_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_train),
                                'dp', 'table_rbf_20')
        ext_table_rbf_20_tv = cPickle.load(open(filename, 'r'))
        print 'ext_rbf_20_trainval', Evaluation.compute_cls_map(
            ext_table_rbf_20_tv, self.trainval_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_test),
                                'dp', 'table_linear_20')
        ext_linear_20_test = cPickle.load(open(filename, 'r'))
        print 'ext_linear_test', Evaluation.compute_cls_map(
            ext_linear_20_test, self.test_gt)

        filename = os.path.join(config.get_ext_dets_foldname(self.d_train),
                                'dp', 'table_linear_20')
        ext_table_linear_20 = cPickle.load(open(filename, 'r'))
        print 'ext_linear_20_trainval', Evaluation.compute_cls_map(
            ext_table_linear_20, self.trainval_gt)

        filename = 'tab_linear_5'
        ext_tab_lin_5 = cPickle.load(open(filename, 'r'))
        print 'ext_tab_lin_5_trainval', Evaluation.compute_cls_map(
            ext_tab_lin_5, self.trainval_gt)
示例#6
0
  return table

def conv(d_train, table_arr):
  table = Table()
  #table_arr = cPickle.load(open('table_linear_5','r'))
  table.arr = np.hstack((table_arr, np.array(np.arange(table_arr.shape[0]),ndmin=2).T))
  table.cols = d_train.classes + ['img_ind']
  print table
  #cPickle.dump(table, open('tab_linear_5','w'))
  return table
  
if __name__=='__main__':
  d_train = Dataset('full_pascal_trainval')
  d_val = Dataset('full_pascal_val')

  train_gt = d_train.get_cls_ground_truth()
  val_gt = d_val.get_cls_ground_truth()

  if comm_rank == 0:
    filename = os.path.join(config.get_classifier_dataset_dirname(CSCClassifier('default','dog', d_train, d_val), d_train),'crossval.txt')
  
  kernels =  ['linear']
  Cs = [50]
  
  settings = list(itertools.product(kernels, Cs))
  
  for setin in settings:
    kernel = setin[0]
    C = setin[1]
    
    #train_csc_svms(d_train, d_val, kernel, C)
示例#7
0
def conv(d_train, table_arr):
    table = Table()
    #table_arr = cPickle.load(open('table_linear_5','r'))
    table.arr = np.hstack(
        (table_arr, np.array(np.arange(table_arr.shape[0]), ndmin=2).T))
    table.cols = d_train.classes + ['img_ind']
    print table
    #cPickle.dump(table, open('tab_linear_5','w'))
    return table


if __name__ == '__main__':
    d_train = Dataset('full_pascal_trainval')
    d_val = Dataset('full_pascal_val')

    train_gt = d_train.get_cls_ground_truth()
    val_gt = d_val.get_cls_ground_truth()

    if comm_rank == 0:
        filename = os.path.join(
            config.get_classifier_dataset_dirname(
                CSCClassifier('default', 'dog', d_train, d_val), d_train),
            'crossval.txt')

    kernels = ['linear']
    Cs = [50]

    settings = list(itertools.product(kernels, Cs))

    for setin in settings:
        kernel = setin[0]
示例#8
0
class TestDatasetJson:
  def setup(self):
    self.d = Dataset('test_data1',force=True)
    self.classes = ["A","B","C"]

  def test_load(self):
    assert(self.d.num_images() == 4)
    assert(self.d.classes == self.classes)

  def test_get_det_gt(self):
    gt = self.d.get_det_gt(with_diff=True,with_trun=False)
    df = Table(
      np.array([[ 0.,  0.,  0.,  0.,  0.,  0, 0, 0.],
       [ 1.,  1.,  1.,  1.,  1.,  0, 0, 0.],
       [ 1.,  1.,  1.,  0.,  0.,  0, 0, 1.],
       [ 0.,  0.,  0.,  0.,  1.,  0, 0, 2.],
       [ 0.,  0.,  0.,  0.,  2.,  0, 0, 3.],
       [ 1.,  1.,  1.,  1.,  2.,  0, 0, 3.]]),
       ['x','y','w','h','cls_ind','diff','trun','img_ind'])
    print(gt)
    print(df)
    assert(gt == df)

  def test_get_cls_counts_json(self):
    arr = np.array(
      [ [ 1, 1, 0],
        [ 1, 0, 0],
        [ 0, 1, 0],
        [ 0, 0, 2]])
    print(self.d.get_cls_counts())
    assert(np.all(self.d.get_cls_counts() == arr))

  def test_get_cls_ground_truth_json(self):
    table = Table(
      np.array([ [ True, True, False],
        [ True, False, False],
        [ False, True, False],
        [ False, False, True] ]), ["A","B","C"])
    assert(self.d.get_cls_ground_truth()==table)

  def test_det_ground_truth_for_class_json(self):
    gt = self.d.get_det_gt_for_class("A",with_diff=True,with_trun=True)
    arr = np.array(
      [[ 0.,  0.,  0.,  0.,  0., 0., 0, 0.],
       [ 1.,  1.,  1.,  0.,  0., 0., 0., 1.]])
    cols = ['x','y','w','h','cls_ind','diff','trun','img_ind']
    print(gt.arr)
    assert(np.all(gt.arr == arr))
    assert(gt.cols == cols)

    # no diff or trun
    gt = self.d.get_det_gt_for_class("A",with_diff=False,with_trun=False)
    arr = np.array(
      [[ 0.,  0.,  0.,  0.,  0., 0., 0, 0.],
       [ 1.,  1.,  1.,  0.,  0., 0., 0., 1.]])
    cols = ['x','y','w','h','cls_ind','diff','trun','img_ind']
    print(gt.arr)
    assert(np.all(gt.arr == arr))
    assert(gt.cols == cols)

  def test_set_values(self):
    assert(np.all(self.d.values == 1/3. * np.ones(len(self.classes))))
    self.d.set_values('uniform')
    assert(np.all(self.d.values == 1/3. * np.ones(len(self.classes))))
    self.d.set_values('inverse_prior')
    print(self.d.values)
    assert(np.all(self.d.values == np.array([0.25,0.25,0.5])))