Exemple #1
0
  def test_load_dpm_detections(self):
    conf = dict(self.config)
    conf['detectors'] = ['dpm']
    policy = DatasetPolicy(self.dataset,self.train_dataset,**conf)
    assert(policy.detectors == ['dpm'])
    dets = policy.load_ext_detections(self.dataset,'dpm_may25',force=True)
    dets = dets.with_column_omitted('time')

    # load the ground truth dets, processed in Matlab
    # (timely/data/test_support/concat_dets.m)
    filename = os.path.join(config.test_support_dir, 'val_dets.mat')
    dets_correct = Table(
        scipy.io.loadmat(filename)['dets'],
        ['x1','y1','x2','y2','dummy','dummy','dummy','dummy','score','cls_ind','img_ind'],
        'dets_correct')
    dets_correct = dets_correct.subset(
        ['x1','y1','x2','y2','score','cls_ind','img_ind'])
    dets_correct.arr[:,:4] -= 1
    dets_correct.arr[:,:4] = BoundingBox.convert_arr_from_corners(
        dets_correct.arr[:,:4])
    dets_correct.cols = ['x','y','w','h','score','cls_ind','img_ind']
    
    print('----mine:')
    print(dets)
    print('----correct:')
    print(dets_correct)
    assert(dets_correct == dets)
Exemple #2
0
    def test_load_dpm_detections(self):
        conf = dict(self.config)
        conf['detectors'] = ['dpm']
        policy = DatasetPolicy(self.dataset, self.train_dataset, **conf)
        assert (policy.detectors == ['dpm'])
        dets = policy.load_ext_detections(self.dataset,
                                          'dpm_may25',
                                          force=True)
        dets = dets.with_column_omitted('time')

        # load the ground truth dets, processed in Matlab
        # (timely/data/test_support/concat_dets.m)
        filename = os.path.join(config.test_support_dir, 'val_dets.mat')
        dets_correct = Table(
            scipy.io.loadmat(filename)['dets'], [
                'x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy', 'dummy',
                'score', 'cls_ind', 'img_ind'
            ], 'dets_correct')
        dets_correct = dets_correct.subset(
            ['x1', 'y1', 'x2', 'y2', 'score', 'cls_ind', 'img_ind'])
        dets_correct.arr[:, :4] -= 1
        dets_correct.arr[:, :4] = BoundingBox.convert_arr_from_corners(
            dets_correct.arr[:, :4])
        dets_correct.cols = ['x', 'y', 'w', 'h', 'score', 'cls_ind', 'img_ind']

        print('----mine:')
        print(dets)
        print('----correct:')
        print(dets_correct)
        assert (dets_correct == dets)
Exemple #3
0
    def compute_det_pr_and_hard_neg(cls, dets, gt):
        """
    Take Table of detections and Table of ground truth.
    Ground truth can be for a single image or a whole dataset
    and can contain either all classes or just one class (but the cls_ind col
    must be present in either case).
    Depending on these decisions, the meaning of the PR evaluation is
    different.
    In particular, if gt is for a single class but dets are for multiple
    classes, there will be a lot of false positives!
    NOTE: modifies dets in-place (sorts by score)
    Return ap, recall, and precision vectors as tuple.
    """
        gt = gt.copy()

        # if dets or gt are empty, return 0's
        nd = dets.arr.shape[0]
        if nd < 1 or gt.shape[0] < 1:
            ap = 0
            rec = np.array([0])
            prec = np.array([0])
            return (ap, rec, prec)
        tt = ut.TicToc().tic()

        # augment gt with a column keeping track of matches
        cols = list(gt.cols) + ['matched']
        arr = np.zeros((gt.arr.shape[0], gt.arr.shape[1] + 1))
        arr[:, :-1] = gt.arr
        gt = Table(arr, cols)

        # sort detections by confidence
        dets.sort_by_column('score', descending=True)

        # match detections to ground truth objects
        npos = gt.filter_on_column('diff', 0).shape[0]
        tp = np.zeros(nd)
        fp = np.zeros(nd)
        hard_neg = np.zeros(nd)
        for d in range(nd):
            if tt.qtoc() > 15:
                print("... on %d/%d dets" % (d, nd))
                tt.tic()

            det = dets.arr[d, :]

            # find ground truth for this image
            if 'img_ind' in gt.cols:
                img_ind = det[dets.ind('img_ind')]
                inds = gt.arr[:, gt.ind('img_ind')] == img_ind
                gt_for_image = gt.arr[inds, :]
            else:
                gt_for_image = gt.arr

            if gt_for_image.shape[0] < 1:
                # this can happen if we're passing ground truth for a class
                # false positive due to detection in image that does not contain the class
                fp[d] = 1
                hard_neg[d] = 1
                continue

            # find the maximally overlapping ground truth element for this detection
            overlaps = BoundingBox.get_overlap(gt_for_image[:, :4], det[:4])
            jmax = overlaps.argmax()
            ovmax = overlaps[jmax]

            # assign detection as true positive/don't care/false positive
            if ovmax >= cls.MIN_OVERLAP:
                if not gt_for_image[jmax, gt.ind('diff')]:
                    is_matched = gt_for_image[jmax, gt.ind('matched')]
                    if is_matched == 0:
                        if gt_for_image[jmax, gt.ind('cls_ind')] == det[
                                dets.ind('cls_ind')]:
                            # true positive
                            tp[d] = 1
                            gt_for_image[jmax, gt.ind('matched')] = 1
                        else:
                            # false positive due to wrong class
                            fp[d] = 1
                            hard_neg[d] = 1
                    else:
                        # false positive due to multiple detection
                        # this is still a correct answer, so not a hard negative
                        fp[d] = 1
                else:
                    None
                    # NOT a false positive because object is difficult!
            else:
                # false positive due to not matching any ground truth object
                fp[d] = 1
                hard_neg[d] = 1
            # NOTE: this is very important: otherwise, gt.arr does not get the
            # changes we make to gt_for_image
            if 'img_ind' in gt.cols:
                gt.arr[inds, :] = gt_for_image

        ap, rec, prec = cls.compute_rec_prec_ap(tp, fp, npos)
        return (ap, rec, prec, hard_neg)
Exemple #4
0
  def compute_det_pr_and_hard_neg(cls, dets, gt):
    """
    Take Table of detections and Table of ground truth.
    Ground truth can be for a single image or a whole dataset
    and can contain either all classes or just one class (but the cls_ind col
    must be present in either case).
    Depending on these decisions, the meaning of the PR evaluation is
    different.
    In particular, if gt is for a single class but dets are for multiple
    classes, there will be a lot of false positives!
    NOTE: modifies dets in-place (sorts by score)
    Return ap, recall, and precision vectors as tuple.
    """
    gt = gt.copy()

    # if dets or gt are empty, return 0's
    nd = dets.arr.shape[0]
    if nd < 1 or gt.shape[0] < 1:
      ap = 0
      rec = np.array([0])
      prec = np.array([0])
      return (ap,rec,prec)
    tt = ut.TicToc().tic()

    # augment gt with a column keeping track of matches
    cols = list(gt.cols) + ['matched']
    arr = np.zeros((gt.arr.shape[0],gt.arr.shape[1]+1))
    arr[:,:-1] = gt.arr
    gt = Table(arr,cols)

    # sort detections by confidence
    dets.sort_by_column('score',descending=True)

    # match detections to ground truth objects
    npos = gt.filter_on_column('diff',0).shape[0]
    tp = np.zeros(nd)
    fp = np.zeros(nd)
    hard_neg = np.zeros(nd)
    for d in range(nd):
      if tt.qtoc() > 15:
        print("... on %d/%d dets"%(d,nd))
        tt.tic()

      det = dets.arr[d,:]

      # find ground truth for this image
      if 'img_ind' in gt.cols:
        img_ind = det[dets.ind('img_ind')]
        inds = gt.arr[:,gt.ind('img_ind')] == img_ind
        gt_for_image = gt.arr[inds,:]
      else:
        gt_for_image = gt.arr
      
      if gt_for_image.shape[0] < 1:
        # this can happen if we're passing ground truth for a class
        # false positive due to detection in image that does not contain the class
        fp[d] = 1 
        hard_neg[d] = 1
        continue

      # find the maximally overlapping ground truth element for this detection
      overlaps = BoundingBox.get_overlap(gt_for_image[:,:4],det[:4])
      jmax = overlaps.argmax()
      ovmax = overlaps[jmax]

      # assign detection as true positive/don't care/false positive
      if ovmax >= cls.MIN_OVERLAP:
        if not gt_for_image[jmax,gt.ind('diff')]:
          is_matched = gt_for_image[jmax,gt.ind('matched')]
          if is_matched == 0:
            if gt_for_image[jmax,gt.ind('cls_ind')] == det[dets.ind('cls_ind')]:
              # true positive
              tp[d] = 1
              gt_for_image[jmax,gt.ind('matched')] = 1
            else:
              # false positive due to wrong class
              fp[d] = 1
              hard_neg[d] = 1
          else:
            # false positive due to multiple detection
            # this is still a correct answer, so not a hard negative
            fp[d] = 1
        else:
          None
          # NOT a false positive because object is difficult!
      else:
        # false positive due to not matching any ground truth object
        fp[d] = 1
        hard_neg[d] = 1
      # NOTE: this is very important: otherwise, gt.arr does not get the
      # changes we make to gt_for_image
      if 'img_ind' in gt.cols:
        gt.arr[inds,:] = gt_for_image

    ap,rec,prec = cls.compute_rec_prec_ap(tp,fp,npos)
    return (ap,rec,prec,hard_neg)