Beispiel #1
0
 def _calc_metrics(self, bboxes, anno):
     valid = ~np.any(np.isnan(anno), axis=1)
     if len(valid) == 0:
         ops.sys_print('Warning: no valid annotations')
         return None, None
     else:
         ious = ops.rect_iou(bboxes[valid, :], anno[valid, :])
         center_errors = ops.center_error(bboxes[valid, :], anno[valid, :])
         return ious, center_errors
Beispiel #2
0
 def convert_to_best_bboxes(self, bboxes, annos):
     best_bboxes = []
     max_indexes = []
     bboxes = np.reshape(bboxes, (-1, 100, 4))
     for bbox, anno in zip(bboxes, annos):
         bbox = bbox[:15, :]
         ious = ops.rect_iou(bbox, anno)
         max_index = ious.argmax()
         max_indexes.append(max_index)
         best_bboxes.append(bbox[max_index, :])
     return np.array(best_bboxes), np.array(max_indexes)
Beispiel #3
0
    def test_iou(self):
        r1 = np.random.rand(1000, 4) * 100
        r2 = np.random.rand(1000, 4) * 100
        r1[:, 2:] += r1[:, :2] - 1
        r2[:, 2:] += r2[:, :2] - 1

        for bound in [None, (50, 100), (100, 200)]:
            o1 = ops.rect_iou(r1, r2, bound=bound)
            o2 = ops.poly_iou(r1, r2, bound=bound)
            self.assertTrue((o1 - o2).max() < 1e-12)

            p1 = self._to_corner(r1)
            p2 = self._to_corner(r2)
            o3 = ops.poly_iou(p1, p2, bound=bound)
            self.assertTrue((o1 - o3).max() < 1e-12)
Beispiel #4
0
    def report(self, tracker_names, plot_curves=False):
        if isinstance(tracker_names, str):
            tracker_names = [tracker_names]
        assert isinstance(tracker_names, (list, tuple))

        if self.dataset.subset == 'test':
            pwd = os.getcwd()

            # generate compressed submission file for each tracker
            for tracker_name in tracker_names:
                # compress all tracking results
                result_dir = osp.join(self.result_dir, tracker_name)
                os.chdir(result_dir)
                save_file = '../%s' % tracker_name
                ops.compress('.', save_file)
                ops.sys_print('Records saved at %s' % (save_file + '.zip'))

            # print submission guides
            ops.sys_print('\033[93mLogin and follow instructions on')
            ops.sys_print('http://got-10k.aitestunion.com/submit_instructions')
            ops.sys_print('to upload and evaluate your tracking results\033[0m')

            # switch back to previous working directory
            os.chdir(pwd)

            return None
        elif self.dataset.subset == 'val':
            # assume tracker_names[0] is your tracker
            report_dir = osp.join(self.report_dir, tracker_names[0])
            if not osp.exists(report_dir):
                os.makedirs(report_dir)
            report_file = osp.join(report_dir, 'performance.json')

            # visible ratios of all sequences
            seq_names = self.dataset.seq_names
            covers = {s: self.dataset[s][1]['meta']['cover'][1:]
                      for s in seq_names}

            performance = {}
            for name in tracker_names:
                ops.sys_print('Evaluating %s' % name)
                ious = {}
                times = {}
                performance.update({name: {
                    'overall': {},
                    'seq_wise': {}}})

                for s, (_, target) in enumerate(self.dataset):
                    seq_name = self.dataset.seq_names[s]
                    anno, meta = target['anno'], target['meta']

                    record_files = glob.glob(osp.join(
                        self.result_dir, name, seq_name,
                        '%s_[0-9]*.txt' % seq_name))
                    if len(record_files) == 0:
                        raise Exception('Results for sequence %s not found.' % seq_name)

                    # read results of all repetitions
                    bboxes = [np.loadtxt(f, delimiter=',') for f in record_files]
                    assert all([b.shape == anno.shape for b in bboxes])

                    # calculate and stack all ious
                    bound = ast.literal_eval(meta['resolution'])
                    seq_ious = [ops.rect_iou(
                        b[1:], anno[1:], bound=bound) for b in bboxes]
                    # only consider valid frames where targets are visible
                    seq_ious = [t[covers[seq_name] > 0] for t in seq_ious]
                    seq_ious = np.concatenate(seq_ious)
                    ious[seq_name] = seq_ious

                    # stack all tracking times
                    times[seq_name] = []
                    time_file = osp.join(
                        self.result_dir, name, seq_name,
                        '%s_time.txt' % seq_name)
                    if osp.exists(time_file):
                        seq_times = np.loadtxt(time_file, delimiter=',')
                        seq_times = seq_times[~np.isnan(seq_times)]
                        seq_times = seq_times[seq_times > 0]
                        if len(seq_times) > 0:
                            times[seq_name] = seq_times

                    # store sequence-wise performance
                    ao, sr, speed, _ = self._evaluate(seq_ious, seq_times)
                    performance[name]['seq_wise'].update({seq_name: {
                        'ao': ao,
                        'sr': sr,
                        'speed_fps': speed,
                        'length': len(anno) - 1}})

                ious = np.concatenate(list(ious.values()))
                times = np.concatenate(list(times.values()))

                # store overall performance
                ao, sr, speed, succ_curve = self._evaluate(ious, times)
                performance[name].update({'overall': {
                    'ao': ao,
                    'sr': sr,
                    'speed_fps': speed,
                    'succ_curve': succ_curve.tolist()}})

            # save performance
            with open(report_file, 'w') as f:
                json.dump(performance, f, indent=4)
            if plot_curves:
                # plot success curves
                self.plot_curves([report_file], tracker_names)

            return performance
Beispiel #5
0
 def _calc_metrics(self, bboxes, anno):
     # can be modified by children classes
     ious = ops.rect_iou(bboxes, anno)
     center_errors = ops.center_error(bboxes, anno)
     return ious, center_errors