예제 #1
0
    def test_load_dpm_detections(self):
        conf = dict(self.config)
        conf['detectors'] = ['dpm']
        policy = DatasetPolicy(self.dataset, self.train_dataset, **conf)
        assert (policy.detectors == ['dpm'])
        dets = policy.load_ext_detections(self.dataset,
                                          'dpm_may25',
                                          force=True)
        dets = dets.with_column_omitted('time')

        # load the ground truth dets, processed in Matlab
        # (timely/data/test_support/concat_dets.m)
        filename = os.path.join(config.test_support_dir, 'val_dets.mat')
        dets_correct = Table(
            scipy.io.loadmat(filename)['dets'], [
                'x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy', 'dummy',
                'score', 'cls_ind', 'img_ind'
            ], 'dets_correct')
        dets_correct = dets_correct.subset(
            ['x1', 'y1', 'x2', 'y2', 'score', 'cls_ind', 'img_ind'])
        dets_correct.arr[:, :4] -= 1
        dets_correct.arr[:, :4] = BoundingBox.convert_arr_from_corners(
            dets_correct.arr[:, :4])
        dets_correct.cols = ['x', 'y', 'w', 'h', 'score', 'cls_ind', 'img_ind']

        print('----mine:')
        print(dets)
        print('----correct:')
        print(dets_correct)
        assert (dets_correct == dets)
예제 #2
0
    def test_load_dpm_detections(self):
        conf = dict(self.config)
        conf["detectors"] = ["dpm"]
        policy = DatasetPolicy(self.dataset, self.train_dataset, **conf)
        assert policy.detectors == ["dpm"]
        dets = policy.load_ext_detections(self.dataset, "dpm_may25", force=True)
        dets = dets.with_column_omitted("time")

        # load the ground truth dets, processed in Matlab
        # (timely/data/test_support/concat_dets.m)
        filename = os.path.join(config.test_support_dir, "val_dets.mat")
        dets_correct = Table(
            scipy.io.loadmat(filename)["dets"],
            ["x1", "y1", "x2", "y2", "dummy", "dummy", "dummy", "dummy", "score", "cls_ind", "img_ind"],
            "dets_correct",
        )
        dets_correct = dets_correct.subset(["x1", "y1", "x2", "y2", "score", "cls_ind", "img_ind"])
        dets_correct.arr[:, :4] -= 1
        dets_correct.arr[:, :4] = BoundingBox.convert_arr_from_corners(dets_correct.arr[:, :4])
        dets_correct.cols = ["x", "y", "w", "h", "score", "cls_ind", "img_ind"]

        print ("----mine:")
        print (dets)
        print ("----correct:")
        print (dets_correct)
        assert dets_correct == dets
예제 #3
0
 def load_dpm_dets_for_image(cls,
                             image,
                             dataset,
                             suffix='dets_all_may25_DP'):
     """
     Loads multi-class array of detections for an image from .mat format.
     """
     t = time.time()
     name = os.path.splitext(image.name)[0]
     # TODO: figure out how to deal with different types of detections
     dets_dir = '/u/vis/x1/sergeyk/rl_detection/voc-release4/2007/tmp/dets_may25_DP'
     filename = os.path.join(dets_dir, '%s_dets_all_may25_DP.mat' % name)
     if not os.path.exists(filename):
         dets_dir = '/u/vis/x1/sergeyk/rl_detection/voc-release4/2007/tmp/dets_jun1_DP_trainval'
         filename = os.path.join(dets_dir,
                                 '%s_dets_all_jun1_DP_trainval.mat' % name)
         if not os.path.exists(filename):
             filename = os.path.join(config.test_support_dir,
                                     'dets/%s_dets_all_may25_DP.mat' % name)
             if not os.path.exists(filename):
                 print("File does not exist!")
                 return None
     mat = scipy.io.loadmat(filename)
     dets = mat['dets_mc']
     times = mat['times_mc']
     # feat_time = times[0,0]
     dets_seq = []
     cols = [
         'x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy', 'dummy',
         'score', 'time'
     ]
     for cls_ind, cls in enumerate(config.pascal_classes):
         cls_dets = dets[cls_ind][0]
         if cls_dets.shape[0] > 0:
             det_time = times[cls_ind, 1]
             # all detections get the final time
             cls_dets = ut.append_index_column(cls_dets, det_time)
             cls_dets = ut.append_index_column(cls_dets, cls_ind)
             # subtract 1 pixel and convert from corners!
             cls_dets[:, :4] -= 1
             cls_dets[:, :4] = BoundingBox.convert_arr_from_corners(
                 cls_dets[:, :4])
             dets_seq.append(cls_dets)
     cols = [
         'x', 'y', 'w', 'h', 'dummy', 'dummy', 'dummy', 'dummy', 'score',
         'time', 'cls_ind'
     ]
     # NMS detections per class individually
     dets_mc = ut.collect(dets_seq, Detector.nms_detections, {'cols': cols})
     dets_mc[:, :4] = BoundingBox.clipboxes_arr(
         dets_mc[:, :4], (0, 0, image.size[0] - 1, image.size[1] - 1))
     time_elapsed = time.time() - t
     print("On image %s, took %.3f s" % (image.name, time_elapsed))
     return dets_mc
예제 #4
0
 def load_csc_dpm_dets_for_image(cls, image, dataset):
     """
     Loads HOS's cascaded dets.
     """
     t = time.time()
     name = os.path.splitext(image.name)[0]
     # if uest dataset, use HOS's detections. if not, need to output my own
     if re.search('test', dataset.name):
         dirname = config.get_dets_test_wholeset_dir()
         filename = os.path.join(
             dirname,
             '%s_dets_all_test_original_cascade_wholeset.mat' % name)
     else:
         dirname = config.get_dets_nov19()
         filename = os.path.join(dirname, '%s_dets_all_nov19.mat' % name)
     print filename
     if not os.path.exists(filename):
         raise RuntimeError("File %s does not exist!" % filename)
         return None
     mat = scipy.io.loadmat(filename)
     dets = mat['dets_mc']
     times = mat['times_mc']
     # feat_time = times[0,0]
     dets_seq = []
     cols = [
         'x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy', 'dummy',
         'dummy', 'dummy', 'score'
     ]
     for cls_ind, cls in enumerate(dataset.classes):
         cls_dets = dets[cls_ind][0]
         if cls_dets.shape[0] > 0:
             good_ind = [0, 1, 2, 3, 10]
             cls_dets = cls_dets[:, good_ind]
             det_time = times[cls_ind, 1]
             # all detections get the final time
             cls_dets = ut.append_index_column(cls_dets, det_time)
             cls_dets = ut.append_index_column(cls_dets, cls_ind)
             # convert from corners!
             cls_dets[:, :4] = BoundingBox.convert_arr_from_corners(
                 cls_dets[:, :4])
             cls_dets[:, :4] = BoundingBox.clipboxes_arr(
                 cls_dets[:, :4], (0, 0, image.size[0], image.size[1]))
             dets_seq.append(cls_dets)
     cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind']
     dets_mc = ut.collect(dets_seq, Detector.nms_detections, {'cols': cols})
     time_elapsed = time.time() - t
     print("On image %s, took %.3f s" % (image.name, time_elapsed))
     return dets_mc
 def load_dpm_dets_for_image(cls, image, dataset, suffix='dets_all_may25_DP'):
     """
     Loads multi-class array of detections for an image from .mat format.
     """
     t = time.time()
     name = os.path.splitext(image.name)[0]
     # TODO: figure out how to deal with different types of detections
     dets_dir = '/u/vis/x1/sergeyk/rl_detection/voc-release4/2007/tmp/dets_may25_DP'
     filename = os.path.join(dets_dir, '%s_dets_all_may25_DP.mat' % name)
     if not os.path.exists(filename):
         dets_dir = '/u/vis/x1/sergeyk/rl_detection/voc-release4/2007/tmp/dets_jun1_DP_trainval'
         filename = os.path.join(
             dets_dir, '%s_dets_all_jun1_DP_trainval.mat' % name)
         if not os.path.exists(filename):
             filename = os.path.join(
                 config.test_support_dir, 'dets/%s_dets_all_may25_DP.mat' % name)
             if not os.path.exists(filename):
                 print("File does not exist!")
                 return None
     mat = scipy.io.loadmat(filename)
     dets = mat['dets_mc']
     times = mat['times_mc']
     # feat_time = times[0,0]
     dets_seq = []
     cols = ['x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy',
             'dummy', 'score', 'time']
     for cls_ind, cls in enumerate(config.pascal_classes):
         cls_dets = dets[cls_ind][0]
         if cls_dets.shape[0] > 0:
             det_time = times[cls_ind, 1]
             # all detections get the final time
             cls_dets = ut.append_index_column(cls_dets, det_time)
             cls_dets = ut.append_index_column(cls_dets, cls_ind)
             # subtract 1 pixel and convert from corners!
             cls_dets[:, :4] -= 1
             cls_dets[:, :
                      4] = BoundingBox.convert_arr_from_corners(cls_dets[:, :4])
             dets_seq.append(cls_dets)
     cols = ['x', 'y', 'w', 'h', 'dummy', 'dummy', 'dummy',
             'dummy', 'score', 'time', 'cls_ind']
     # NMS detections per class individually
     dets_mc = ut.collect(dets_seq, Detector.nms_detections, {'cols': cols})
     dets_mc[:, :4] = BoundingBox.clipboxes_arr(
         dets_mc[:, :4], (0, 0, image.size[0] - 1, image.size[1] - 1))
     time_elapsed = time.time() - t
     print("On image %s, took %.3f s" % (image.name, time_elapsed))
     return dets_mc
 def load_csc_dpm_dets_for_image(cls, image, dataset):
     """
     Loads HOS's cascaded dets.
     """
     t = time.time()
     name = os.path.splitext(image.name)[0]
     # if uest dataset, use HOS's detections. if not, need to output my own
     if re.search('test', dataset.name):
         dirname = config.get_dets_test_wholeset_dir()
         filename = os.path.join(
             dirname, '%s_dets_all_test_original_cascade_wholeset.mat' % name)
     else:
         dirname = config.get_dets_nov19()
         filename = os.path.join(dirname, '%s_dets_all_nov19.mat' % name)
     print filename
     if not os.path.exists(filename):
         raise RuntimeError("File %s does not exist!" % filename)
         return None
     mat = scipy.io.loadmat(filename)
     dets = mat['dets_mc']
     times = mat['times_mc']
     # feat_time = times[0,0]
     dets_seq = []
     cols = ['x1', 'y1', 'x2', 'y2', 'dummy', 'dummy', 'dummy',
             'dummy', 'dummy', 'dummy', 'score']
     for cls_ind, cls in enumerate(dataset.classes):
         cls_dets = dets[cls_ind][0]
         if cls_dets.shape[0] > 0:
             good_ind = [0, 1, 2, 3, 10]
             cls_dets = cls_dets[:, good_ind]
             det_time = times[cls_ind, 1]
             # all detections get the final time
             cls_dets = ut.append_index_column(cls_dets, det_time)
             cls_dets = ut.append_index_column(cls_dets, cls_ind)
             # convert from corners!
             cls_dets[:, :
                      4] = BoundingBox.convert_arr_from_corners(cls_dets[:, :4])
             cls_dets[:, :4] = BoundingBox.clipboxes_arr(
                 cls_dets[:, :4], (0, 0, image.size[0], image.size[1]))
             dets_seq.append(cls_dets)
     cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind']
     dets_mc = ut.collect(dets_seq, Detector.nms_detections, {'cols': cols})
     time_elapsed = time.time() - t
     print("On image %s, took %.3f s" % (image.name, time_elapsed))
     return dets_mc